ZTWHHH commited on
Commit
05effa5
·
verified ·
1 Parent(s): e2eea08

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. parrot/lib/python3.10/site-packages/charset_normalizer-3.3.2.dist-info/INSTALLER +1 -0
  2. parrot/lib/python3.10/site-packages/charset_normalizer-3.3.2.dist-info/LICENSE +21 -0
  3. parrot/lib/python3.10/site-packages/charset_normalizer-3.3.2.dist-info/METADATA +683 -0
  4. parrot/lib/python3.10/site-packages/charset_normalizer-3.3.2.dist-info/RECORD +36 -0
  5. parrot/lib/python3.10/site-packages/charset_normalizer-3.3.2.dist-info/REQUESTED +0 -0
  6. parrot/lib/python3.10/site-packages/charset_normalizer-3.3.2.dist-info/WHEEL +6 -0
  7. parrot/lib/python3.10/site-packages/charset_normalizer-3.3.2.dist-info/top_level.txt +1 -0
  8. parrot/lib/python3.10/site-packages/fsspec/__init__.py +69 -0
  9. parrot/lib/python3.10/site-packages/fsspec/_version.py +16 -0
  10. parrot/lib/python3.10/site-packages/fsspec/archive.py +73 -0
  11. parrot/lib/python3.10/site-packages/fsspec/asyn.py +1096 -0
  12. parrot/lib/python3.10/site-packages/fsspec/caching.py +951 -0
  13. parrot/lib/python3.10/site-packages/fsspec/callbacks.py +324 -0
  14. parrot/lib/python3.10/site-packages/fsspec/compression.py +175 -0
  15. parrot/lib/python3.10/site-packages/fsspec/config.py +131 -0
  16. parrot/lib/python3.10/site-packages/fsspec/conftest.py +55 -0
  17. parrot/lib/python3.10/site-packages/fsspec/core.py +738 -0
  18. parrot/lib/python3.10/site-packages/fsspec/dircache.py +98 -0
  19. parrot/lib/python3.10/site-packages/fsspec/exceptions.py +18 -0
  20. parrot/lib/python3.10/site-packages/fsspec/fuse.py +324 -0
  21. parrot/lib/python3.10/site-packages/fsspec/generic.py +411 -0
  22. parrot/lib/python3.10/site-packages/fsspec/gui.py +414 -0
  23. parrot/lib/python3.10/site-packages/fsspec/implementations/cache_metadata.py +232 -0
  24. parrot/lib/python3.10/site-packages/fsspec/implementations/dask.py +152 -0
  25. parrot/lib/python3.10/site-packages/fsspec/implementations/jupyter.py +124 -0
  26. parrot/lib/python3.10/site-packages/fsspec/implementations/memory.py +303 -0
  27. parrot/lib/python3.10/site-packages/fsspec/implementations/smb.py +343 -0
  28. parrot/lib/python3.10/site-packages/fsspec/mapping.py +251 -0
  29. parrot/lib/python3.10/site-packages/fsspec/parquet.py +541 -0
  30. parrot/lib/python3.10/site-packages/fsspec/registry.py +311 -0
  31. parrot/lib/python3.10/site-packages/fsspec/spec.py +2068 -0
  32. parrot/lib/python3.10/site-packages/fsspec/transaction.py +90 -0
  33. parrot/lib/python3.10/site-packages/fsspec/utils.py +740 -0
  34. parrot/lib/python3.10/site-packages/huggingface_hub/__pycache__/__init__.cpython-310.pyc +0 -0
  35. parrot/lib/python3.10/site-packages/huggingface_hub/__pycache__/_commit_api.cpython-310.pyc +0 -0
  36. parrot/lib/python3.10/site-packages/huggingface_hub/__pycache__/_inference_endpoints.cpython-310.pyc +0 -0
  37. parrot/lib/python3.10/site-packages/huggingface_hub/__pycache__/_login.cpython-310.pyc +0 -0
  38. parrot/lib/python3.10/site-packages/huggingface_hub/__pycache__/_snapshot_download.cpython-310.pyc +0 -0
  39. parrot/lib/python3.10/site-packages/huggingface_hub/__pycache__/_space_api.cpython-310.pyc +0 -0
  40. parrot/lib/python3.10/site-packages/huggingface_hub/__pycache__/_upload_large_folder.cpython-310.pyc +0 -0
  41. parrot/lib/python3.10/site-packages/huggingface_hub/__pycache__/_webhooks_payload.cpython-310.pyc +0 -0
  42. parrot/lib/python3.10/site-packages/huggingface_hub/__pycache__/community.cpython-310.pyc +0 -0
  43. parrot/lib/python3.10/site-packages/huggingface_hub/__pycache__/errors.cpython-310.pyc +0 -0
  44. parrot/lib/python3.10/site-packages/huggingface_hub/__pycache__/fastai_utils.cpython-310.pyc +0 -0
  45. parrot/lib/python3.10/site-packages/huggingface_hub/__pycache__/hub_mixin.cpython-310.pyc +0 -0
  46. parrot/lib/python3.10/site-packages/huggingface_hub/__pycache__/lfs.cpython-310.pyc +0 -0
  47. parrot/lib/python3.10/site-packages/huggingface_hub/__pycache__/repocard.cpython-310.pyc +0 -0
  48. parrot/lib/python3.10/site-packages/huggingface_hub/__pycache__/repocard_data.cpython-310.pyc +0 -0
  49. parrot/lib/python3.10/site-packages/huggingface_hub/__pycache__/repository.cpython-310.pyc +0 -0
  50. parrot/lib/python3.10/site-packages/huggingface_hub/commands/__init__.py +27 -0
parrot/lib/python3.10/site-packages/charset_normalizer-3.3.2.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
parrot/lib/python3.10/site-packages/charset_normalizer-3.3.2.dist-info/LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2019 TAHRI Ahmed R.
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
parrot/lib/python3.10/site-packages/charset_normalizer-3.3.2.dist-info/METADATA ADDED
@@ -0,0 +1,683 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: charset-normalizer
3
+ Version: 3.3.2
4
+ Summary: The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet.
5
+ Home-page: https://github.com/Ousret/charset_normalizer
6
+ Author: Ahmed TAHRI
7
+ Author-email: ahmed.tahri@cloudnursery.dev
8
+ License: MIT
9
+ Project-URL: Bug Reports, https://github.com/Ousret/charset_normalizer/issues
10
+ Project-URL: Documentation, https://charset-normalizer.readthedocs.io/en/latest
11
+ Keywords: encoding,charset,charset-detector,detector,normalization,unicode,chardet,detect
12
+ Classifier: Development Status :: 5 - Production/Stable
13
+ Classifier: License :: OSI Approved :: MIT License
14
+ Classifier: Intended Audience :: Developers
15
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
16
+ Classifier: Operating System :: OS Independent
17
+ Classifier: Programming Language :: Python
18
+ Classifier: Programming Language :: Python :: 3
19
+ Classifier: Programming Language :: Python :: 3.7
20
+ Classifier: Programming Language :: Python :: 3.8
21
+ Classifier: Programming Language :: Python :: 3.9
22
+ Classifier: Programming Language :: Python :: 3.10
23
+ Classifier: Programming Language :: Python :: 3.11
24
+ Classifier: Programming Language :: Python :: 3.12
25
+ Classifier: Programming Language :: Python :: Implementation :: PyPy
26
+ Classifier: Topic :: Text Processing :: Linguistic
27
+ Classifier: Topic :: Utilities
28
+ Classifier: Typing :: Typed
29
+ Requires-Python: >=3.7.0
30
+ Description-Content-Type: text/markdown
31
+ License-File: LICENSE
32
+ Provides-Extra: unicode_backport
33
+
34
+ <h1 align="center">Charset Detection, for Everyone 👋</h1>
35
+
36
+ <p align="center">
37
+ <sup>The Real First Universal Charset Detector</sup><br>
38
+ <a href="https://pypi.org/project/charset-normalizer">
39
+ <img src="https://img.shields.io/pypi/pyversions/charset_normalizer.svg?orange=blue" />
40
+ </a>
41
+ <a href="https://pepy.tech/project/charset-normalizer/">
42
+ <img alt="Download Count Total" src="https://static.pepy.tech/badge/charset-normalizer/month" />
43
+ </a>
44
+ <a href="https://bestpractices.coreinfrastructure.org/projects/7297">
45
+ <img src="https://bestpractices.coreinfrastructure.org/projects/7297/badge">
46
+ </a>
47
+ </p>
48
+ <p align="center">
49
+ <sup><i>Featured Packages</i></sup><br>
50
+ <a href="https://github.com/jawah/niquests">
51
+ <img alt="Static Badge" src="https://img.shields.io/badge/Niquests-HTTP_1.1%2C%202%2C_and_3_Client-cyan">
52
+ </a>
53
+ <a href="https://github.com/jawah/wassima">
54
+ <img alt="Static Badge" src="https://img.shields.io/badge/Wassima-Certifi_Killer-cyan">
55
+ </a>
56
+ </p>
57
+ <p align="center">
58
+ <sup><i>In other language (unofficial port - by the community)</i></sup><br>
59
+ <a href="https://github.com/nickspring/charset-normalizer-rs">
60
+ <img alt="Static Badge" src="https://img.shields.io/badge/Rust-red">
61
+ </a>
62
+ </p>
63
+
64
+ > A library that helps you read text from an unknown charset encoding.<br /> Motivated by `chardet`,
65
+ > I'm trying to resolve the issue by taking a new approach.
66
+ > All IANA character set names for which the Python core library provides codecs are supported.
67
+
68
+ <p align="center">
69
+ >>>>> <a href="https://charsetnormalizerweb.ousret.now.sh" target="_blank">👉 Try Me Online Now, Then Adopt Me 👈 </a> <<<<<
70
+ </p>
71
+
72
+ This project offers you an alternative to **Universal Charset Encoding Detector**, also known as **Chardet**.
73
+
74
+ | Feature | [Chardet](https://github.com/chardet/chardet) | Charset Normalizer | [cChardet](https://github.com/PyYoshi/cChardet) |
75
+ |--------------------------------------------------|:---------------------------------------------:|:--------------------------------------------------------------------------------------------------:|:-----------------------------------------------:|
76
+ | `Fast` | ❌ | ✅ | ✅ |
77
+ | `Universal**` | ❌ | ✅ | ❌ |
78
+ | `Reliable` **without** distinguishable standards | ❌ | ✅ | ✅ |
79
+ | `Reliable` **with** distinguishable standards | ✅ | ✅ | ✅ |
80
+ | `License` | LGPL-2.1<br>_restrictive_ | MIT | MPL-1.1<br>_restrictive_ |
81
+ | `Native Python` | ✅ | ✅ | ❌ |
82
+ | `Detect spoken language` | ❌ | ✅ | N/A |
83
+ | `UnicodeDecodeError Safety` | ❌ | ✅ | ❌ |
84
+ | `Whl Size (min)` | 193.6 kB | 42 kB | ~200 kB |
85
+ | `Supported Encoding` | 33 | 🎉 [99](https://charset-normalizer.readthedocs.io/en/latest/user/support.html#supported-encodings) | 40 |
86
+
87
+ <p align="center">
88
+ <img src="https://i.imgflip.com/373iay.gif" alt="Reading Normalized Text" width="226"/><img src="https://media.tenor.com/images/c0180f70732a18b4965448d33adba3d0/tenor.gif" alt="Cat Reading Text" width="200"/>
89
+ </p>
90
+
91
+ *\*\* : They are clearly using specific code for a specific encoding even if covering most of used one*<br>
92
+ Did you got there because of the logs? See [https://charset-normalizer.readthedocs.io/en/latest/user/miscellaneous.html](https://charset-normalizer.readthedocs.io/en/latest/user/miscellaneous.html)
93
+
94
+ ## ⚡ Performance
95
+
96
+ This package offer better performance than its counterpart Chardet. Here are some numbers.
97
+
98
+ | Package | Accuracy | Mean per file (ms) | File per sec (est) |
99
+ |-----------------------------------------------|:--------:|:------------------:|:------------------:|
100
+ | [chardet](https://github.com/chardet/chardet) | 86 % | 200 ms | 5 file/sec |
101
+ | charset-normalizer | **98 %** | **10 ms** | 100 file/sec |
102
+
103
+ | Package | 99th percentile | 95th percentile | 50th percentile |
104
+ |-----------------------------------------------|:---------------:|:---------------:|:---------------:|
105
+ | [chardet](https://github.com/chardet/chardet) | 1200 ms | 287 ms | 23 ms |
106
+ | charset-normalizer | 100 ms | 50 ms | 5 ms |
107
+
108
+ Chardet's performance on larger file (1MB+) are very poor. Expect huge difference on large payload.
109
+
110
+ > Stats are generated using 400+ files using default parameters. More details on used files, see GHA workflows.
111
+ > And yes, these results might change at any time. The dataset can be updated to include more files.
112
+ > The actual delays heavily depends on your CPU capabilities. The factors should remain the same.
113
+ > Keep in mind that the stats are generous and that Chardet accuracy vs our is measured using Chardet initial capability
114
+ > (eg. Supported Encoding) Challenge-them if you want.
115
+
116
+ ## ✨ Installation
117
+
118
+ Using pip:
119
+
120
+ ```sh
121
+ pip install charset-normalizer -U
122
+ ```
123
+
124
+ ## 🚀 Basic Usage
125
+
126
+ ### CLI
127
+ This package comes with a CLI.
128
+
129
+ ```
130
+ usage: normalizer [-h] [-v] [-a] [-n] [-m] [-r] [-f] [-t THRESHOLD]
131
+ file [file ...]
132
+
133
+ The Real First Universal Charset Detector. Discover originating encoding used
134
+ on text file. Normalize text to unicode.
135
+
136
+ positional arguments:
137
+ files File(s) to be analysed
138
+
139
+ optional arguments:
140
+ -h, --help show this help message and exit
141
+ -v, --verbose Display complementary information about file if any.
142
+ Stdout will contain logs about the detection process.
143
+ -a, --with-alternative
144
+ Output complementary possibilities if any. Top-level
145
+ JSON WILL be a list.
146
+ -n, --normalize Permit to normalize input file. If not set, program
147
+ does not write anything.
148
+ -m, --minimal Only output the charset detected to STDOUT. Disabling
149
+ JSON output.
150
+ -r, --replace Replace file when trying to normalize it instead of
151
+ creating a new one.
152
+ -f, --force Replace file without asking if you are sure, use this
153
+ flag with caution.
154
+ -t THRESHOLD, --threshold THRESHOLD
155
+ Define a custom maximum amount of chaos allowed in
156
+ decoded content. 0. <= chaos <= 1.
157
+ --version Show version information and exit.
158
+ ```
159
+
160
+ ```bash
161
+ normalizer ./data/sample.1.fr.srt
162
+ ```
163
+
164
+ or
165
+
166
+ ```bash
167
+ python -m charset_normalizer ./data/sample.1.fr.srt
168
+ ```
169
+
170
+ 🎉 Since version 1.4.0 the CLI produce easily usable stdout result in JSON format.
171
+
172
+ ```json
173
+ {
174
+ "path": "/home/default/projects/charset_normalizer/data/sample.1.fr.srt",
175
+ "encoding": "cp1252",
176
+ "encoding_aliases": [
177
+ "1252",
178
+ "windows_1252"
179
+ ],
180
+ "alternative_encodings": [
181
+ "cp1254",
182
+ "cp1256",
183
+ "cp1258",
184
+ "iso8859_14",
185
+ "iso8859_15",
186
+ "iso8859_16",
187
+ "iso8859_3",
188
+ "iso8859_9",
189
+ "latin_1",
190
+ "mbcs"
191
+ ],
192
+ "language": "French",
193
+ "alphabets": [
194
+ "Basic Latin",
195
+ "Latin-1 Supplement"
196
+ ],
197
+ "has_sig_or_bom": false,
198
+ "chaos": 0.149,
199
+ "coherence": 97.152,
200
+ "unicode_path": null,
201
+ "is_preferred": true
202
+ }
203
+ ```
204
+
205
+ ### Python
206
+ *Just print out normalized text*
207
+ ```python
208
+ from charset_normalizer import from_path
209
+
210
+ results = from_path('./my_subtitle.srt')
211
+
212
+ print(str(results.best()))
213
+ ```
214
+
215
+ *Upgrade your code without effort*
216
+ ```python
217
+ from charset_normalizer import detect
218
+ ```
219
+
220
+ The above code will behave the same as **chardet**. We ensure that we offer the best (reasonable) BC result possible.
221
+
222
+ See the docs for advanced usage : [readthedocs.io](https://charset-normalizer.readthedocs.io/en/latest/)
223
+
224
+ ## 😇 Why
225
+
226
+ When I started using Chardet, I noticed that it was not suited to my expectations, and I wanted to propose a
227
+ reliable alternative using a completely different method. Also! I never back down on a good challenge!
228
+
229
+ I **don't care** about the **originating charset** encoding, because **two different tables** can
230
+ produce **two identical rendered string.**
231
+ What I want is to get readable text, the best I can.
232
+
233
+ In a way, **I'm brute forcing text decoding.** How cool is that ? 😎
234
+
235
+ Don't confuse package **ftfy** with charset-normalizer or chardet. ftfy goal is to repair unicode string whereas charset-normalizer to convert raw file in unknown encoding to unicode.
236
+
237
+ ## 🍰 How
238
+
239
+ - Discard all charset encoding table that could not fit the binary content.
240
+ - Measure noise, or the mess once opened (by chunks) with a corresponding charset encoding.
241
+ - Extract matches with the lowest mess detected.
242
+ - Additionally, we measure coherence / probe for a language.
243
+
244
+ **Wait a minute**, what is noise/mess and coherence according to **YOU ?**
245
+
246
+ *Noise :* I opened hundred of text files, **written by humans**, with the wrong encoding table. **I observed**, then
247
+ **I established** some ground rules about **what is obvious** when **it seems like** a mess.
248
+ I know that my interpretation of what is noise is probably incomplete, feel free to contribute in order to
249
+ improve or rewrite it.
250
+
251
+ *Coherence :* For each language there is on earth, we have computed ranked letter appearance occurrences (the best we can). So I thought
252
+ that intel is worth something here. So I use those records against decoded text to check if I can detect intelligent design.
253
+
254
+ ## ⚡ Known limitations
255
+
256
+ - Language detection is unreliable when text contains two or more languages sharing identical letters. (eg. HTML (english tags) + Turkish content (Sharing Latin characters))
257
+ - Every charset detector heavily depends on sufficient content. In common cases, do not bother run detection on very tiny content.
258
+
259
+ ## ⚠️ About Python EOLs
260
+
261
+ **If you are running:**
262
+
263
+ - Python >=2.7,<3.5: Unsupported
264
+ - Python 3.5: charset-normalizer < 2.1
265
+ - Python 3.6: charset-normalizer < 3.1
266
+ - Python 3.7: charset-normalizer < 4.0
267
+
268
+ Upgrade your Python interpreter as soon as possible.
269
+
270
+ ## 👤 Contributing
271
+
272
+ Contributions, issues and feature requests are very much welcome.<br />
273
+ Feel free to check [issues page](https://github.com/ousret/charset_normalizer/issues) if you want to contribute.
274
+
275
+ ## 📝 License
276
+
277
+ Copyright © [Ahmed TAHRI @Ousret](https://github.com/Ousret).<br />
278
+ This project is [MIT](https://github.com/Ousret/charset_normalizer/blob/master/LICENSE) licensed.
279
+
280
+ Characters frequencies used in this project © 2012 [Denny Vrandečić](http://simia.net/letters/)
281
+
282
+ ## 💼 For Enterprise
283
+
284
+ Professional support for charset-normalizer is available as part of the [Tidelift
285
+ Subscription][1]. Tidelift gives software development teams a single source for
286
+ purchasing and maintaining their software, with professional grade assurances
287
+ from the experts who know it best, while seamlessly integrating with existing
288
+ tools.
289
+
290
+ [1]: https://tidelift.com/subscription/pkg/pypi-charset-normalizer?utm_source=pypi-charset-normalizer&utm_medium=readme
291
+
292
+ # Changelog
293
+ All notable changes to charset-normalizer will be documented in this file. This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
294
+ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
295
+
296
+ ## [3.3.2](https://github.com/Ousret/charset_normalizer/compare/3.3.1...3.3.2) (2023-10-31)
297
+
298
+ ### Fixed
299
+ - Unintentional memory usage regression when using large payload that match several encoding (#376)
300
+ - Regression on some detection case showcased in the documentation (#371)
301
+
302
+ ### Added
303
+ - Noise (md) probe that identify malformed arabic representation due to the presence of letters in isolated form (credit to my wife)
304
+
305
+ ## [3.3.1](https://github.com/Ousret/charset_normalizer/compare/3.3.0...3.3.1) (2023-10-22)
306
+
307
+ ### Changed
308
+ - Optional mypyc compilation upgraded to version 1.6.1 for Python >= 3.8
309
+ - Improved the general detection reliability based on reports from the community
310
+
311
+ ## [3.3.0](https://github.com/Ousret/charset_normalizer/compare/3.2.0...3.3.0) (2023-09-30)
312
+
313
+ ### Added
314
+ - Allow to execute the CLI (e.g. normalizer) through `python -m charset_normalizer.cli` or `python -m charset_normalizer`
315
+ - Support for 9 forgotten encoding that are supported by Python but unlisted in `encoding.aliases` as they have no alias (#323)
316
+
317
+ ### Removed
318
+ - (internal) Redundant utils.is_ascii function and unused function is_private_use_only
319
+ - (internal) charset_normalizer.assets is moved inside charset_normalizer.constant
320
+
321
+ ### Changed
322
+ - (internal) Unicode code blocks in constants are updated using the latest v15.0.0 definition to improve detection
323
+ - Optional mypyc compilation upgraded to version 1.5.1 for Python >= 3.8
324
+
325
+ ### Fixed
326
+ - Unable to properly sort CharsetMatch when both chaos/noise and coherence were close due to an unreachable condition in \_\_lt\_\_ (#350)
327
+
328
+ ## [3.2.0](https://github.com/Ousret/charset_normalizer/compare/3.1.0...3.2.0) (2023-06-07)
329
+
330
+ ### Changed
331
+ - Typehint for function `from_path` no longer enforce `PathLike` as its first argument
332
+ - Minor improvement over the global detection reliability
333
+
334
+ ### Added
335
+ - Introduce function `is_binary` that relies on main capabilities, and optimized to detect binaries
336
+ - Propagate `enable_fallback` argument throughout `from_bytes`, `from_path`, and `from_fp` that allow a deeper control over the detection (default True)
337
+ - Explicit support for Python 3.12
338
+
339
+ ### Fixed
340
+ - Edge case detection failure where a file would contain 'very-long' camel cased word (Issue #289)
341
+
342
+ ## [3.1.0](https://github.com/Ousret/charset_normalizer/compare/3.0.1...3.1.0) (2023-03-06)
343
+
344
+ ### Added
345
+ - Argument `should_rename_legacy` for legacy function `detect` and disregard any new arguments without errors (PR #262)
346
+
347
+ ### Removed
348
+ - Support for Python 3.6 (PR #260)
349
+
350
+ ### Changed
351
+ - Optional speedup provided by mypy/c 1.0.1
352
+
353
+ ## [3.0.1](https://github.com/Ousret/charset_normalizer/compare/3.0.0...3.0.1) (2022-11-18)
354
+
355
+ ### Fixed
356
+ - Multi-bytes cutter/chunk generator did not always cut correctly (PR #233)
357
+
358
+ ### Changed
359
+ - Speedup provided by mypy/c 0.990 on Python >= 3.7
360
+
361
+ ## [3.0.0](https://github.com/Ousret/charset_normalizer/compare/2.1.1...3.0.0) (2022-10-20)
362
+
363
+ ### Added
364
+ - Extend the capability of explain=True when cp_isolation contains at most two entries (min one), will log in details of the Mess-detector results
365
+ - Support for alternative language frequency set in charset_normalizer.assets.FREQUENCIES
366
+ - Add parameter `language_threshold` in `from_bytes`, `from_path` and `from_fp` to adjust the minimum expected coherence ratio
367
+ - `normalizer --version` now specify if current version provide extra speedup (meaning mypyc compilation whl)
368
+
369
+ ### Changed
370
+ - Build with static metadata using 'build' frontend
371
+ - Make the language detection stricter
372
+ - Optional: Module `md.py` can be compiled using Mypyc to provide an extra speedup up to 4x faster than v2.1
373
+
374
+ ### Fixed
375
+ - CLI with opt --normalize fail when using full path for files
376
+ - TooManyAccentuatedPlugin induce false positive on the mess detection when too few alpha character have been fed to it
377
+ - Sphinx warnings when generating the documentation
378
+
379
+ ### Removed
380
+ - Coherence detector no longer return 'Simple English' instead return 'English'
381
+ - Coherence detector no longer return 'Classical Chinese' instead return 'Chinese'
382
+ - Breaking: Method `first()` and `best()` from CharsetMatch
383
+ - UTF-7 will no longer appear as "detected" without a recognized SIG/mark (is unreliable/conflict with ASCII)
384
+ - Breaking: Class aliases CharsetDetector, CharsetDoctor, CharsetNormalizerMatch and CharsetNormalizerMatches
385
+ - Breaking: Top-level function `normalize`
386
+ - Breaking: Properties `chaos_secondary_pass`, `coherence_non_latin` and `w_counter` from CharsetMatch
387
+ - Support for the backport `unicodedata2`
388
+
389
+ ## [3.0.0rc1](https://github.com/Ousret/charset_normalizer/compare/3.0.0b2...3.0.0rc1) (2022-10-18)
390
+
391
+ ### Added
392
+ - Extend the capability of explain=True when cp_isolation contains at most two entries (min one), will log in details of the Mess-detector results
393
+ - Support for alternative language frequency set in charset_normalizer.assets.FREQUENCIES
394
+ - Add parameter `language_threshold` in `from_bytes`, `from_path` and `from_fp` to adjust the minimum expected coherence ratio
395
+
396
+ ### Changed
397
+ - Build with static metadata using 'build' frontend
398
+ - Make the language detection stricter
399
+
400
+ ### Fixed
401
+ - CLI with opt --normalize fail when using full path for files
402
+ - TooManyAccentuatedPlugin induce false positive on the mess detection when too few alpha character have been fed to it
403
+
404
+ ### Removed
405
+ - Coherence detector no longer return 'Simple English' instead return 'English'
406
+ - Coherence detector no longer return 'Classical Chinese' instead return 'Chinese'
407
+
408
+ ## [3.0.0b2](https://github.com/Ousret/charset_normalizer/compare/3.0.0b1...3.0.0b2) (2022-08-21)
409
+
410
+ ### Added
411
+ - `normalizer --version` now specify if current version provide extra speedup (meaning mypyc compilation whl)
412
+
413
+ ### Removed
414
+ - Breaking: Method `first()` and `best()` from CharsetMatch
415
+ - UTF-7 will no longer appear as "detected" without a recognized SIG/mark (is unreliable/conflict with ASCII)
416
+
417
+ ### Fixed
418
+ - Sphinx warnings when generating the documentation
419
+
420
+ ## [3.0.0b1](https://github.com/Ousret/charset_normalizer/compare/2.1.0...3.0.0b1) (2022-08-15)
421
+
422
+ ### Changed
423
+ - Optional: Module `md.py` can be compiled using Mypyc to provide an extra speedup up to 4x faster than v2.1
424
+
425
+ ### Removed
426
+ - Breaking: Class aliases CharsetDetector, CharsetDoctor, CharsetNormalizerMatch and CharsetNormalizerMatches
427
+ - Breaking: Top-level function `normalize`
428
+ - Breaking: Properties `chaos_secondary_pass`, `coherence_non_latin` and `w_counter` from CharsetMatch
429
+ - Support for the backport `unicodedata2`
430
+
431
+ ## [2.1.1](https://github.com/Ousret/charset_normalizer/compare/2.1.0...2.1.1) (2022-08-19)
432
+
433
+ ### Deprecated
434
+ - Function `normalize` scheduled for removal in 3.0
435
+
436
+ ### Changed
437
+ - Removed useless call to decode in fn is_unprintable (#206)
438
+
439
+ ### Fixed
440
+ - Third-party library (i18n xgettext) crashing not recognizing utf_8 (PEP 263) with underscore from [@aleksandernovikov](https://github.com/aleksandernovikov) (#204)
441
+
442
+ ## [2.1.0](https://github.com/Ousret/charset_normalizer/compare/2.0.12...2.1.0) (2022-06-19)
443
+
444
+ ### Added
445
+ - Output the Unicode table version when running the CLI with `--version` (PR #194)
446
+
447
+ ### Changed
448
+ - Re-use decoded buffer for single byte character sets from [@nijel](https://github.com/nijel) (PR #175)
449
+ - Fixing some performance bottlenecks from [@deedy5](https://github.com/deedy5) (PR #183)
450
+
451
+ ### Fixed
452
+ - Workaround potential bug in cpython with Zero Width No-Break Space located in Arabic Presentation Forms-B, Unicode 1.1 not acknowledged as space (PR #175)
453
+ - CLI default threshold aligned with the API threshold from [@oleksandr-kuzmenko](https://github.com/oleksandr-kuzmenko) (PR #181)
454
+
455
+ ### Removed
456
+ - Support for Python 3.5 (PR #192)
457
+
458
+ ### Deprecated
459
+ - Use of backport unicodedata from `unicodedata2` as Python is quickly catching up, scheduled for removal in 3.0 (PR #194)
460
+
461
+ ## [2.0.12](https://github.com/Ousret/charset_normalizer/compare/2.0.11...2.0.12) (2022-02-12)
462
+
463
+ ### Fixed
464
+ - ASCII miss-detection on rare cases (PR #170)
465
+
466
+ ## [2.0.11](https://github.com/Ousret/charset_normalizer/compare/2.0.10...2.0.11) (2022-01-30)
467
+
468
+ ### Added
469
+ - Explicit support for Python 3.11 (PR #164)
470
+
471
+ ### Changed
472
+ - The logging behavior have been completely reviewed, now using only TRACE and DEBUG levels (PR #163 #165)
473
+
474
+ ## [2.0.10](https://github.com/Ousret/charset_normalizer/compare/2.0.9...2.0.10) (2022-01-04)
475
+
476
+ ### Fixed
477
+ - Fallback match entries might lead to UnicodeDecodeError for large bytes sequence (PR #154)
478
+
479
+ ### Changed
480
+ - Skipping the language-detection (CD) on ASCII (PR #155)
481
+
482
+ ## [2.0.9](https://github.com/Ousret/charset_normalizer/compare/2.0.8...2.0.9) (2021-12-03)
483
+
484
+ ### Changed
485
+ - Moderating the logging impact (since 2.0.8) for specific environments (PR #147)
486
+
487
+ ### Fixed
488
+ - Wrong logging level applied when setting kwarg `explain` to True (PR #146)
489
+
490
+ ## [2.0.8](https://github.com/Ousret/charset_normalizer/compare/2.0.7...2.0.8) (2021-11-24)
491
+ ### Changed
492
+ - Improvement over Vietnamese detection (PR #126)
493
+ - MD improvement on trailing data and long foreign (non-pure latin) data (PR #124)
494
+ - Efficiency improvements in cd/alphabet_languages from [@adbar](https://github.com/adbar) (PR #122)
495
+ - call sum() without an intermediary list following PEP 289 recommendations from [@adbar](https://github.com/adbar) (PR #129)
496
+ - Code style as refactored by Sourcery-AI (PR #131)
497
+ - Minor adjustment on the MD around european words (PR #133)
498
+ - Remove and replace SRTs from assets / tests (PR #139)
499
+ - Initialize the library logger with a `NullHandler` by default from [@nmaynes](https://github.com/nmaynes) (PR #135)
500
+ - Setting kwarg `explain` to True will add provisionally (bounded to function lifespan) a specific stream handler (PR #135)
501
+
502
+ ### Fixed
503
+ - Fix large (misleading) sequence giving UnicodeDecodeError (PR #137)
504
+ - Avoid using too insignificant chunk (PR #137)
505
+
506
+ ### Added
507
+ - Add and expose function `set_logging_handler` to configure a specific StreamHandler from [@nmaynes](https://github.com/nmaynes) (PR #135)
508
+ - Add `CHANGELOG.md` entries, format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) (PR #141)
509
+
510
+ ## [2.0.7](https://github.com/Ousret/charset_normalizer/compare/2.0.6...2.0.7) (2021-10-11)
511
+ ### Added
512
+ - Add support for Kazakh (Cyrillic) language detection (PR #109)
513
+
514
+ ### Changed
515
+ - Further, improve inferring the language from a given single-byte code page (PR #112)
516
+ - Vainly trying to leverage PEP263 when PEP3120 is not supported (PR #116)
517
+ - Refactoring for potential performance improvements in loops from [@adbar](https://github.com/adbar) (PR #113)
518
+ - Various detection improvement (MD+CD) (PR #117)
519
+
520
+ ### Removed
521
+ - Remove redundant logging entry about detected language(s) (PR #115)
522
+
523
+ ### Fixed
524
+ - Fix a minor inconsistency between Python 3.5 and other versions regarding language detection (PR #117 #102)
525
+
526
+ ## [2.0.6](https://github.com/Ousret/charset_normalizer/compare/2.0.5...2.0.6) (2021-09-18)
527
+ ### Fixed
528
+ - Unforeseen regression with the loss of the backward-compatibility with some older minor of Python 3.5.x (PR #100)
529
+ - Fix CLI crash when using --minimal output in certain cases (PR #103)
530
+
531
+ ### Changed
532
+ - Minor improvement to the detection efficiency (less than 1%) (PR #106 #101)
533
+
534
+ ## [2.0.5](https://github.com/Ousret/charset_normalizer/compare/2.0.4...2.0.5) (2021-09-14)
535
+ ### Changed
536
+ - The project now comply with: flake8, mypy, isort and black to ensure a better overall quality (PR #81)
537
+ - The BC-support with v1.x was improved, the old staticmethods are restored (PR #82)
538
+ - The Unicode detection is slightly improved (PR #93)
539
+ - Add syntax sugar \_\_bool\_\_ for results CharsetMatches list-container (PR #91)
540
+
541
+ ### Removed
542
+ - The project no longer raise warning on tiny content given for detection, will be simply logged as warning instead (PR #92)
543
+
544
+ ### Fixed
545
+ - In some rare case, the chunks extractor could cut in the middle of a multi-byte character and could mislead the mess detection (PR #95)
546
+ - Some rare 'space' characters could trip up the UnprintablePlugin/Mess detection (PR #96)
547
+ - The MANIFEST.in was not exhaustive (PR #78)
548
+
549
+ ## [2.0.4](https://github.com/Ousret/charset_normalizer/compare/2.0.3...2.0.4) (2021-07-30)
550
+ ### Fixed
551
+ - The CLI no longer raise an unexpected exception when no encoding has been found (PR #70)
552
+ - Fix accessing the 'alphabets' property when the payload contains surrogate characters (PR #68)
553
+ - The logger could mislead (explain=True) on detected languages and the impact of one MBCS match (PR #72)
554
+ - Submatch factoring could be wrong in rare edge cases (PR #72)
555
+ - Multiple files given to the CLI were ignored when publishing results to STDOUT. (After the first path) (PR #72)
556
+ - Fix line endings from CRLF to LF for certain project files (PR #67)
557
+
558
+ ### Changed
559
+ - Adjust the MD to lower the sensitivity, thus improving the global detection reliability (PR #69 #76)
560
+ - Allow fallback on specified encoding if any (PR #71)
561
+
562
+ ## [2.0.3](https://github.com/Ousret/charset_normalizer/compare/2.0.2...2.0.3) (2021-07-16)
563
+ ### Changed
564
+ - Part of the detection mechanism has been improved to be less sensitive, resulting in more accurate detection results. Especially ASCII. (PR #63)
565
+ - According to the community wishes, the detection will fall back on ASCII or UTF-8 in a last-resort case. (PR #64)
566
+
567
+ ## [2.0.2](https://github.com/Ousret/charset_normalizer/compare/2.0.1...2.0.2) (2021-07-15)
568
+ ### Fixed
569
+ - Empty/Too small JSON payload miss-detection fixed. Report from [@tseaver](https://github.com/tseaver) (PR #59)
570
+
571
+ ### Changed
572
+ - Don't inject unicodedata2 into sys.modules from [@akx](https://github.com/akx) (PR #57)
573
+
574
+ ## [2.0.1](https://github.com/Ousret/charset_normalizer/compare/2.0.0...2.0.1) (2021-07-13)
575
+ ### Fixed
576
+ - Make it work where there isn't a filesystem available, dropping assets frequencies.json. Report from [@sethmlarson](https://github.com/sethmlarson). (PR #55)
577
+ - Using explain=False permanently disable the verbose output in the current runtime (PR #47)
578
+ - One log entry (language target preemptive) was not show in logs when using explain=True (PR #47)
579
+ - Fix undesired exception (ValueError) on getitem of instance CharsetMatches (PR #52)
580
+
581
+ ### Changed
582
+ - Public function normalize default args values were not aligned with from_bytes (PR #53)
583
+
584
+ ### Added
585
+ - You may now use charset aliases in cp_isolation and cp_exclusion arguments (PR #47)
586
+
587
+ ## [2.0.0](https://github.com/Ousret/charset_normalizer/compare/1.4.1...2.0.0) (2021-07-02)
588
+ ### Changed
589
+ - 4x to 5 times faster than the previous 1.4.0 release. At least 2x faster than Chardet.
590
+ - Accent has been made on UTF-8 detection, should perform rather instantaneous.
591
+ - The backward compatibility with Chardet has been greatly improved. The legacy detect function returns an identical charset name whenever possible.
592
+ - The detection mechanism has been slightly improved, now Turkish content is detected correctly (most of the time)
593
+ - The program has been rewritten to ease the readability and maintainability. (+Using static typing)+
594
+ - utf_7 detection has been reinstated.
595
+
596
+ ### Removed
597
+ - This package no longer require anything when used with Python 3.5 (Dropped cached_property)
598
+ - Removed support for these languages: Catalan, Esperanto, Kazakh, Baque, Volapük, Azeri, Galician, Nynorsk, Macedonian, and Serbocroatian.
599
+ - The exception hook on UnicodeDecodeError has been removed.
600
+
601
+ ### Deprecated
602
+ - Methods coherence_non_latin, w_counter, chaos_secondary_pass of the class CharsetMatch are now deprecated and scheduled for removal in v3.0
603
+
604
+ ### Fixed
605
+ - The CLI output used the relative path of the file(s). Should be absolute.
606
+
607
+ ## [1.4.1](https://github.com/Ousret/charset_normalizer/compare/1.4.0...1.4.1) (2021-05-28)
608
+ ### Fixed
609
+ - Logger configuration/usage no longer conflict with others (PR #44)
610
+
611
+ ## [1.4.0](https://github.com/Ousret/charset_normalizer/compare/1.3.9...1.4.0) (2021-05-21)
612
+ ### Removed
613
+ - Using standard logging instead of using the package loguru.
614
+ - Dropping nose test framework in favor of the maintained pytest.
615
+ - Choose to not use dragonmapper package to help with gibberish Chinese/CJK text.
616
+ - Require cached_property only for Python 3.5 due to constraint. Dropping for every other interpreter version.
617
+ - Stop support for UTF-7 that does not contain a SIG.
618
+ - Dropping PrettyTable, replaced with pure JSON output in CLI.
619
+
620
+ ### Fixed
621
+ - BOM marker in a CharsetNormalizerMatch instance could be False in rare cases even if obviously present. Due to the sub-match factoring process.
622
+ - Not searching properly for the BOM when trying utf32/16 parent codec.
623
+
624
+ ### Changed
625
+ - Improving the package final size by compressing frequencies.json.
626
+ - Huge improvement over the larges payload.
627
+
628
+ ### Added
629
+ - CLI now produces JSON consumable output.
630
+ - Return ASCII if given sequences fit. Given reasonable confidence.
631
+
632
+ ## [1.3.9](https://github.com/Ousret/charset_normalizer/compare/1.3.8...1.3.9) (2021-05-13)
633
+
634
+ ### Fixed
635
+ - In some very rare cases, you may end up getting encode/decode errors due to a bad bytes payload (PR #40)
636
+
637
+ ## [1.3.8](https://github.com/Ousret/charset_normalizer/compare/1.3.7...1.3.8) (2021-05-12)
638
+
639
+ ### Fixed
640
+ - Empty given payload for detection may cause an exception if trying to access the `alphabets` property. (PR #39)
641
+
642
+ ## [1.3.7](https://github.com/Ousret/charset_normalizer/compare/1.3.6...1.3.7) (2021-05-12)
643
+
644
+ ### Fixed
645
+ - The legacy detect function should return UTF-8-SIG if sig is present in the payload. (PR #38)
646
+
647
+ ## [1.3.6](https://github.com/Ousret/charset_normalizer/compare/1.3.5...1.3.6) (2021-02-09)
648
+
649
+ ### Changed
650
+ - Amend the previous release to allow prettytable 2.0 (PR #35)
651
+
652
+ ## [1.3.5](https://github.com/Ousret/charset_normalizer/compare/1.3.4...1.3.5) (2021-02-08)
653
+
654
+ ### Fixed
655
+ - Fix error while using the package with a python pre-release interpreter (PR #33)
656
+
657
+ ### Changed
658
+ - Dependencies refactoring, constraints revised.
659
+
660
+ ### Added
661
+ - Add python 3.9 and 3.10 to the supported interpreters
662
+
663
+ MIT License
664
+
665
+ Copyright (c) 2019 TAHRI Ahmed R.
666
+
667
+ Permission is hereby granted, free of charge, to any person obtaining a copy
668
+ of this software and associated documentation files (the "Software"), to deal
669
+ in the Software without restriction, including without limitation the rights
670
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
671
+ copies of the Software, and to permit persons to whom the Software is
672
+ furnished to do so, subject to the following conditions:
673
+
674
+ The above copyright notice and this permission notice shall be included in all
675
+ copies or substantial portions of the Software.
676
+
677
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
678
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
679
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
680
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
681
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
682
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
683
+ SOFTWARE.
parrot/lib/python3.10/site-packages/charset_normalizer-3.3.2.dist-info/RECORD ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ../../../bin/normalizer,sha256=gmkYfJ990pdRvXncZz-r9UkVQvFeTmea_7eWafR4r-s,245
2
+ charset_normalizer-3.3.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
3
+ charset_normalizer-3.3.2.dist-info/LICENSE,sha256=6zGgxaT7Cbik4yBV0lweX5w1iidS_vPNcgIT0cz-4kE,1070
4
+ charset_normalizer-3.3.2.dist-info/METADATA,sha256=cfLhl5A6SI-F0oclm8w8ux9wshL1nipdeCdVnYb4AaA,33550
5
+ charset_normalizer-3.3.2.dist-info/RECORD,,
6
+ charset_normalizer-3.3.2.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
+ charset_normalizer-3.3.2.dist-info/WHEEL,sha256=cD39NF6a3hkhaWoPQJng7gnGZRIfQsUCtwcedITCPtg,152
8
+ charset_normalizer-3.3.2.dist-info/entry_points.txt,sha256=ADSTKrkXZ3hhdOVFi6DcUEHQRS0xfxDIE_pEz4wLIXA,65
9
+ charset_normalizer-3.3.2.dist-info/top_level.txt,sha256=7ASyzePr8_xuZWJsnqJjIBtyV8vhEo0wBCv1MPRRi3Q,19
10
+ charset_normalizer/__init__.py,sha256=UzI3xC8PhmcLRMzSgPb6minTmRq0kWznnCBJ8ZCc2XI,1577
11
+ charset_normalizer/__main__.py,sha256=JxY8bleaENOFlLRb9HfoeZCzAMnn2A1oGR5Xm2eyqg0,73
12
+ charset_normalizer/__pycache__/__init__.cpython-310.pyc,,
13
+ charset_normalizer/__pycache__/__main__.cpython-310.pyc,,
14
+ charset_normalizer/__pycache__/api.cpython-310.pyc,,
15
+ charset_normalizer/__pycache__/cd.cpython-310.pyc,,
16
+ charset_normalizer/__pycache__/constant.cpython-310.pyc,,
17
+ charset_normalizer/__pycache__/legacy.cpython-310.pyc,,
18
+ charset_normalizer/__pycache__/md.cpython-310.pyc,,
19
+ charset_normalizer/__pycache__/models.cpython-310.pyc,,
20
+ charset_normalizer/__pycache__/utils.cpython-310.pyc,,
21
+ charset_normalizer/__pycache__/version.cpython-310.pyc,,
22
+ charset_normalizer/api.py,sha256=WOlWjy6wT8SeMYFpaGbXZFN1TMXa-s8vZYfkL4G29iQ,21097
23
+ charset_normalizer/cd.py,sha256=xwZliZcTQFA3jU0c00PRiu9MNxXTFxQkFLWmMW24ZzI,12560
24
+ charset_normalizer/cli/__init__.py,sha256=D5ERp8P62llm2FuoMzydZ7d9rs8cvvLXqE-1_6oViPc,100
25
+ charset_normalizer/cli/__main__.py,sha256=2F-xURZJzo063Ye-2RLJ2wcmURpbKeAzKwpiws65dAs,9744
26
+ charset_normalizer/cli/__pycache__/__init__.cpython-310.pyc,,
27
+ charset_normalizer/cli/__pycache__/__main__.cpython-310.pyc,,
28
+ charset_normalizer/constant.py,sha256=p0IsOVcEbPWYPOdWhnhRbjK1YVBy6fs05C5vKC-zoxU,40481
29
+ charset_normalizer/legacy.py,sha256=T-QuVMsMeDiQEk8WSszMrzVJg_14AMeSkmHdRYhdl1k,2071
30
+ charset_normalizer/md.cpython-310-x86_64-linux-gnu.so,sha256=Y7QSLD5QLoSFAWys0-tL7R6QB7oi5864zM6zr7RWek4,16064
31
+ charset_normalizer/md.py,sha256=NkSuVLK13_a8c7BxZ4cGIQ5vOtGIWOdh22WZEvjp-7U,19624
32
+ charset_normalizer/md__mypyc.cpython-310-x86_64-linux-gnu.so,sha256=y2N-LgwRp7TCdgRqsmIM8UvKeavC0t8kx_hdRvaSfcY,268472
33
+ charset_normalizer/models.py,sha256=I5i0s4aKCCgLPY2tUY3pwkgFA-BUbbNxQ7hVkVTt62s,11624
34
+ charset_normalizer/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
35
+ charset_normalizer/utils.py,sha256=teiosMqzKjXyAHXnGdjSBOgnBZwx-SkBbCLrx0UXy8M,11894
36
+ charset_normalizer/version.py,sha256=iHKUfHD3kDRSyrh_BN2ojh43TA5-UZQjvbVIEFfpHDs,79
parrot/lib/python3.10/site-packages/charset_normalizer-3.3.2.dist-info/REQUESTED ADDED
File without changes
parrot/lib/python3.10/site-packages/charset_normalizer-3.3.2.dist-info/WHEEL ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.41.2)
3
+ Root-Is-Purelib: false
4
+ Tag: cp310-cp310-manylinux_2_17_x86_64
5
+ Tag: cp310-cp310-manylinux2014_x86_64
6
+
parrot/lib/python3.10/site-packages/charset_normalizer-3.3.2.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ charset_normalizer
parrot/lib/python3.10/site-packages/fsspec/__init__.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from importlib.metadata import entry_points
2
+
3
+ from . import caching
4
+ from ._version import __version__ # noqa: F401
5
+ from .callbacks import Callback
6
+ from .compression import available_compressions
7
+ from .core import get_fs_token_paths, open, open_files, open_local, url_to_fs
8
+ from .exceptions import FSTimeoutError
9
+ from .mapping import FSMap, get_mapper
10
+ from .registry import (
11
+ available_protocols,
12
+ filesystem,
13
+ get_filesystem_class,
14
+ register_implementation,
15
+ registry,
16
+ )
17
+ from .spec import AbstractFileSystem
18
+
19
+ __all__ = [
20
+ "AbstractFileSystem",
21
+ "FSTimeoutError",
22
+ "FSMap",
23
+ "filesystem",
24
+ "register_implementation",
25
+ "get_filesystem_class",
26
+ "get_fs_token_paths",
27
+ "get_mapper",
28
+ "open",
29
+ "open_files",
30
+ "open_local",
31
+ "registry",
32
+ "caching",
33
+ "Callback",
34
+ "available_protocols",
35
+ "available_compressions",
36
+ "url_to_fs",
37
+ ]
38
+
39
+
40
+ def process_entries():
41
+ if entry_points is not None:
42
+ try:
43
+ eps = entry_points()
44
+ except TypeError:
45
+ pass # importlib-metadata < 0.8
46
+ else:
47
+ if hasattr(eps, "select"): # Python 3.10+ / importlib_metadata >= 3.9.0
48
+ specs = eps.select(group="fsspec.specs")
49
+ else:
50
+ specs = eps.get("fsspec.specs", [])
51
+ registered_names = {}
52
+ for spec in specs:
53
+ err_msg = f"Unable to load filesystem from {spec}"
54
+ name = spec.name
55
+ if name in registered_names:
56
+ continue
57
+ registered_names[name] = True
58
+ register_implementation(
59
+ name,
60
+ spec.value.replace(":", "."),
61
+ errtxt=err_msg,
62
+ # We take our implementations as the ones to overload with if
63
+ # for some reason we encounter some, may be the same, already
64
+ # registered
65
+ clobber=True,
66
+ )
67
+
68
+
69
+ process_entries()
parrot/lib/python3.10/site-packages/fsspec/_version.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # file generated by setuptools_scm
2
+ # don't change, don't track in version control
3
+ TYPE_CHECKING = False
4
+ if TYPE_CHECKING:
5
+ from typing import Tuple, Union
6
+ VERSION_TUPLE = Tuple[Union[int, str], ...]
7
+ else:
8
+ VERSION_TUPLE = object
9
+
10
+ version: str
11
+ __version__: str
12
+ __version_tuple__: VERSION_TUPLE
13
+ version_tuple: VERSION_TUPLE
14
+
15
+ __version__ = version = '2024.6.1'
16
+ __version_tuple__ = version_tuple = (2024, 6, 1)
parrot/lib/python3.10/site-packages/fsspec/archive.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fsspec import AbstractFileSystem
2
+ from fsspec.utils import tokenize
3
+
4
+
5
+ class AbstractArchiveFileSystem(AbstractFileSystem):
6
+ """
7
+ A generic superclass for implementing Archive-based filesystems.
8
+
9
+ Currently, it is shared amongst
10
+ :class:`~fsspec.implementations.zip.ZipFileSystem`,
11
+ :class:`~fsspec.implementations.libarchive.LibArchiveFileSystem` and
12
+ :class:`~fsspec.implementations.tar.TarFileSystem`.
13
+ """
14
+
15
+ def __str__(self):
16
+ return f"<Archive-like object {type(self).__name__} at {id(self)}>"
17
+
18
+ __repr__ = __str__
19
+
20
+ def ukey(self, path):
21
+ return tokenize(path, self.fo, self.protocol)
22
+
23
+ def _all_dirnames(self, paths):
24
+ """Returns *all* directory names for each path in paths, including intermediate
25
+ ones.
26
+
27
+ Parameters
28
+ ----------
29
+ paths: Iterable of path strings
30
+ """
31
+ if len(paths) == 0:
32
+ return set()
33
+
34
+ dirnames = {self._parent(path) for path in paths} - {self.root_marker}
35
+ return dirnames | self._all_dirnames(dirnames)
36
+
37
+ def info(self, path, **kwargs):
38
+ self._get_dirs()
39
+ path = self._strip_protocol(path)
40
+ if path in {"", "/"} and self.dir_cache:
41
+ return {"name": "", "type": "directory", "size": 0}
42
+ if path in self.dir_cache:
43
+ return self.dir_cache[path]
44
+ elif path + "/" in self.dir_cache:
45
+ return self.dir_cache[path + "/"]
46
+ else:
47
+ raise FileNotFoundError(path)
48
+
49
+ def ls(self, path, detail=True, **kwargs):
50
+ self._get_dirs()
51
+ paths = {}
52
+ for p, f in self.dir_cache.items():
53
+ p = p.rstrip("/")
54
+ if "/" in p:
55
+ root = p.rsplit("/", 1)[0]
56
+ else:
57
+ root = ""
58
+ if root == path.rstrip("/"):
59
+ paths[p] = f
60
+ elif all(
61
+ (a == b)
62
+ for a, b in zip(path.split("/"), [""] + p.strip("/").split("/"))
63
+ ):
64
+ # root directory entry
65
+ ppath = p.rstrip("/").split("/", 1)[0]
66
+ if ppath not in paths:
67
+ out = {"name": ppath, "size": 0, "type": "directory"}
68
+ paths[ppath] = out
69
+ if detail:
70
+ out = sorted(paths.values(), key=lambda _: _["name"])
71
+ return out
72
+ else:
73
+ return sorted(paths)
parrot/lib/python3.10/site-packages/fsspec/asyn.py ADDED
@@ -0,0 +1,1096 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import asyncio.events
3
+ import functools
4
+ import inspect
5
+ import io
6
+ import numbers
7
+ import os
8
+ import re
9
+ import threading
10
+ from contextlib import contextmanager
11
+ from glob import has_magic
12
+ from typing import TYPE_CHECKING, Iterable
13
+
14
+ from .callbacks import DEFAULT_CALLBACK
15
+ from .exceptions import FSTimeoutError
16
+ from .implementations.local import LocalFileSystem, make_path_posix, trailing_sep
17
+ from .spec import AbstractBufferedFile, AbstractFileSystem
18
+ from .utils import glob_translate, is_exception, other_paths
19
+
20
+ private = re.compile("_[^_]")
21
+ iothread = [None] # dedicated fsspec IO thread
22
+ loop = [None] # global event loop for any non-async instance
23
+ _lock = None # global lock placeholder
24
+ get_running_loop = asyncio.get_running_loop
25
+
26
+
27
+ def get_lock():
28
+ """Allocate or return a threading lock.
29
+
30
+ The lock is allocated on first use to allow setting one lock per forked process.
31
+ """
32
+ global _lock
33
+ if not _lock:
34
+ _lock = threading.Lock()
35
+ return _lock
36
+
37
+
38
+ def reset_lock():
39
+ """Reset the global lock.
40
+
41
+ This should be called only on the init of a forked process to reset the lock to
42
+ None, enabling the new forked process to get a new lock.
43
+ """
44
+ global _lock
45
+
46
+ iothread[0] = None
47
+ loop[0] = None
48
+ _lock = None
49
+
50
+
51
+ async def _runner(event, coro, result, timeout=None):
52
+ timeout = timeout if timeout else None # convert 0 or 0.0 to None
53
+ if timeout is not None:
54
+ coro = asyncio.wait_for(coro, timeout=timeout)
55
+ try:
56
+ result[0] = await coro
57
+ except Exception as ex:
58
+ result[0] = ex
59
+ finally:
60
+ event.set()
61
+
62
+
63
+ def sync(loop, func, *args, timeout=None, **kwargs):
64
+ """
65
+ Make loop run coroutine until it returns. Runs in other thread
66
+
67
+ Examples
68
+ --------
69
+ >>> fsspec.asyn.sync(fsspec.asyn.get_loop(), func, *args,
70
+ timeout=timeout, **kwargs)
71
+ """
72
+ timeout = timeout if timeout else None # convert 0 or 0.0 to None
73
+ # NB: if the loop is not running *yet*, it is OK to submit work
74
+ # and we will wait for it
75
+ if loop is None or loop.is_closed():
76
+ raise RuntimeError("Loop is not running")
77
+ try:
78
+ loop0 = asyncio.events.get_running_loop()
79
+ if loop0 is loop:
80
+ raise NotImplementedError("Calling sync() from within a running loop")
81
+ except NotImplementedError:
82
+ raise
83
+ except RuntimeError:
84
+ pass
85
+ coro = func(*args, **kwargs)
86
+ result = [None]
87
+ event = threading.Event()
88
+ asyncio.run_coroutine_threadsafe(_runner(event, coro, result, timeout), loop)
89
+ while True:
90
+ # this loops allows thread to get interrupted
91
+ if event.wait(1):
92
+ break
93
+ if timeout is not None:
94
+ timeout -= 1
95
+ if timeout < 0:
96
+ raise FSTimeoutError
97
+
98
+ return_result = result[0]
99
+ if isinstance(return_result, asyncio.TimeoutError):
100
+ # suppress asyncio.TimeoutError, raise FSTimeoutError
101
+ raise FSTimeoutError from return_result
102
+ elif isinstance(return_result, BaseException):
103
+ raise return_result
104
+ else:
105
+ return return_result
106
+
107
+
108
+ def sync_wrapper(func, obj=None):
109
+ """Given a function, make so can be called in blocking contexts
110
+
111
+ Leave obj=None if defining within a class. Pass the instance if attaching
112
+ as an attribute of the instance.
113
+ """
114
+
115
+ @functools.wraps(func)
116
+ def wrapper(*args, **kwargs):
117
+ self = obj or args[0]
118
+ return sync(self.loop, func, *args, **kwargs)
119
+
120
+ return wrapper
121
+
122
+
123
+ @contextmanager
124
+ def _selector_policy():
125
+ original_policy = asyncio.get_event_loop_policy()
126
+ try:
127
+ if os.name == "nt" and hasattr(asyncio, "WindowsSelectorEventLoopPolicy"):
128
+ asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
129
+
130
+ yield
131
+ finally:
132
+ asyncio.set_event_loop_policy(original_policy)
133
+
134
+
135
+ def get_loop():
136
+ """Create or return the default fsspec IO loop
137
+
138
+ The loop will be running on a separate thread.
139
+ """
140
+ if loop[0] is None:
141
+ with get_lock():
142
+ # repeat the check just in case the loop got filled between the
143
+ # previous two calls from another thread
144
+ if loop[0] is None:
145
+ with _selector_policy():
146
+ loop[0] = asyncio.new_event_loop()
147
+ th = threading.Thread(target=loop[0].run_forever, name="fsspecIO")
148
+ th.daemon = True
149
+ th.start()
150
+ iothread[0] = th
151
+ return loop[0]
152
+
153
+
154
+ if TYPE_CHECKING:
155
+ import resource
156
+
157
+ ResourceError = resource.error
158
+ else:
159
+ try:
160
+ import resource
161
+ except ImportError:
162
+ resource = None
163
+ ResourceError = OSError
164
+ else:
165
+ ResourceError = getattr(resource, "error", OSError)
166
+
167
+ _DEFAULT_BATCH_SIZE = 128
168
+ _NOFILES_DEFAULT_BATCH_SIZE = 1280
169
+
170
+
171
+ def _get_batch_size(nofiles=False):
172
+ from fsspec.config import conf
173
+
174
+ if nofiles:
175
+ if "nofiles_gather_batch_size" in conf:
176
+ return conf["nofiles_gather_batch_size"]
177
+ else:
178
+ if "gather_batch_size" in conf:
179
+ return conf["gather_batch_size"]
180
+ if nofiles:
181
+ return _NOFILES_DEFAULT_BATCH_SIZE
182
+ if resource is None:
183
+ return _DEFAULT_BATCH_SIZE
184
+
185
+ try:
186
+ soft_limit, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
187
+ except (ImportError, ValueError, ResourceError):
188
+ return _DEFAULT_BATCH_SIZE
189
+
190
+ if soft_limit == resource.RLIM_INFINITY:
191
+ return -1
192
+ else:
193
+ return soft_limit // 8
194
+
195
+
196
+ def running_async() -> bool:
197
+ """Being executed by an event loop?"""
198
+ try:
199
+ asyncio.get_running_loop()
200
+ return True
201
+ except RuntimeError:
202
+ return False
203
+
204
+
205
+ async def _run_coros_in_chunks(
206
+ coros,
207
+ batch_size=None,
208
+ callback=DEFAULT_CALLBACK,
209
+ timeout=None,
210
+ return_exceptions=False,
211
+ nofiles=False,
212
+ ):
213
+ """Run the given coroutines in chunks.
214
+
215
+ Parameters
216
+ ----------
217
+ coros: list of coroutines to run
218
+ batch_size: int or None
219
+ Number of coroutines to submit/wait on simultaneously.
220
+ If -1, then it will not be any throttling. If
221
+ None, it will be inferred from _get_batch_size()
222
+ callback: fsspec.callbacks.Callback instance
223
+ Gets a relative_update when each coroutine completes
224
+ timeout: number or None
225
+ If given, each coroutine times out after this time. Note that, since
226
+ there are multiple batches, the total run time of this function will in
227
+ general be longer
228
+ return_exceptions: bool
229
+ Same meaning as in asyncio.gather
230
+ nofiles: bool
231
+ If inferring the batch_size, does this operation involve local files?
232
+ If yes, you normally expect smaller batches.
233
+ """
234
+
235
+ if batch_size is None:
236
+ batch_size = _get_batch_size(nofiles=nofiles)
237
+
238
+ if batch_size == -1:
239
+ batch_size = len(coros)
240
+
241
+ assert batch_size > 0
242
+
243
+ async def _run_coro(coro, i):
244
+ try:
245
+ return await asyncio.wait_for(coro, timeout=timeout), i
246
+ except Exception as e:
247
+ if not return_exceptions:
248
+ raise
249
+ return e, i
250
+ finally:
251
+ callback.relative_update(1)
252
+
253
+ i = 0
254
+ n = len(coros)
255
+ results = [None] * n
256
+ pending = set()
257
+
258
+ while pending or i < n:
259
+ while len(pending) < batch_size and i < n:
260
+ pending.add(asyncio.ensure_future(_run_coro(coros[i], i)))
261
+ i += 1
262
+
263
+ if not pending:
264
+ break
265
+
266
+ done, pending = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED)
267
+ while done:
268
+ result, k = await done.pop()
269
+ results[k] = result
270
+
271
+ return results
272
+
273
+
274
+ # these methods should be implemented as async by any async-able backend
275
+ async_methods = [
276
+ "_ls",
277
+ "_cat_file",
278
+ "_get_file",
279
+ "_put_file",
280
+ "_rm_file",
281
+ "_cp_file",
282
+ "_pipe_file",
283
+ "_expand_path",
284
+ "_info",
285
+ "_isfile",
286
+ "_isdir",
287
+ "_exists",
288
+ "_walk",
289
+ "_glob",
290
+ "_find",
291
+ "_du",
292
+ "_size",
293
+ "_mkdir",
294
+ "_makedirs",
295
+ ]
296
+
297
+
298
+ class AsyncFileSystem(AbstractFileSystem):
299
+ """Async file operations, default implementations
300
+
301
+ Passes bulk operations to asyncio.gather for concurrent operation.
302
+
303
+ Implementations that have concurrent batch operations and/or async methods
304
+ should inherit from this class instead of AbstractFileSystem. Docstrings are
305
+ copied from the un-underscored method in AbstractFileSystem, if not given.
306
+ """
307
+
308
+ # note that methods do not have docstring here; they will be copied
309
+ # for _* methods and inferred for overridden methods.
310
+
311
+ async_impl = True
312
+ mirror_sync_methods = True
313
+ disable_throttling = False
314
+
315
+ def __init__(self, *args, asynchronous=False, loop=None, batch_size=None, **kwargs):
316
+ self.asynchronous = asynchronous
317
+ self._pid = os.getpid()
318
+ if not asynchronous:
319
+ self._loop = loop or get_loop()
320
+ else:
321
+ self._loop = None
322
+ self.batch_size = batch_size
323
+ super().__init__(*args, **kwargs)
324
+
325
+ @property
326
+ def loop(self):
327
+ if self._pid != os.getpid():
328
+ raise RuntimeError("This class is not fork-safe")
329
+ return self._loop
330
+
331
+ async def _rm_file(self, path, **kwargs):
332
+ raise NotImplementedError
333
+
334
+ async def _rm(self, path, recursive=False, batch_size=None, **kwargs):
335
+ # TODO: implement on_error
336
+ batch_size = batch_size or self.batch_size
337
+ path = await self._expand_path(path, recursive=recursive)
338
+ return await _run_coros_in_chunks(
339
+ [self._rm_file(p, **kwargs) for p in reversed(path)],
340
+ batch_size=batch_size,
341
+ nofiles=True,
342
+ )
343
+
344
+ async def _cp_file(self, path1, path2, **kwargs):
345
+ raise NotImplementedError
346
+
347
+ async def _copy(
348
+ self,
349
+ path1,
350
+ path2,
351
+ recursive=False,
352
+ on_error=None,
353
+ maxdepth=None,
354
+ batch_size=None,
355
+ **kwargs,
356
+ ):
357
+ if on_error is None and recursive:
358
+ on_error = "ignore"
359
+ elif on_error is None:
360
+ on_error = "raise"
361
+
362
+ if isinstance(path1, list) and isinstance(path2, list):
363
+ # No need to expand paths when both source and destination
364
+ # are provided as lists
365
+ paths1 = path1
366
+ paths2 = path2
367
+ else:
368
+ source_is_str = isinstance(path1, str)
369
+ paths1 = await self._expand_path(
370
+ path1, maxdepth=maxdepth, recursive=recursive
371
+ )
372
+ if source_is_str and (not recursive or maxdepth is not None):
373
+ # Non-recursive glob does not copy directories
374
+ paths1 = [
375
+ p for p in paths1 if not (trailing_sep(p) or await self._isdir(p))
376
+ ]
377
+ if not paths1:
378
+ return
379
+
380
+ source_is_file = len(paths1) == 1
381
+ dest_is_dir = isinstance(path2, str) and (
382
+ trailing_sep(path2) or await self._isdir(path2)
383
+ )
384
+
385
+ exists = source_is_str and (
386
+ (has_magic(path1) and source_is_file)
387
+ or (not has_magic(path1) and dest_is_dir and not trailing_sep(path1))
388
+ )
389
+ paths2 = other_paths(
390
+ paths1,
391
+ path2,
392
+ exists=exists,
393
+ flatten=not source_is_str,
394
+ )
395
+
396
+ batch_size = batch_size or self.batch_size
397
+ coros = [self._cp_file(p1, p2, **kwargs) for p1, p2 in zip(paths1, paths2)]
398
+ result = await _run_coros_in_chunks(
399
+ coros, batch_size=batch_size, return_exceptions=True, nofiles=True
400
+ )
401
+
402
+ for ex in filter(is_exception, result):
403
+ if on_error == "ignore" and isinstance(ex, FileNotFoundError):
404
+ continue
405
+ raise ex
406
+
407
+ async def _pipe_file(self, path, value, **kwargs):
408
+ raise NotImplementedError
409
+
410
+ async def _pipe(self, path, value=None, batch_size=None, **kwargs):
411
+ if isinstance(path, str):
412
+ path = {path: value}
413
+ batch_size = batch_size or self.batch_size
414
+ return await _run_coros_in_chunks(
415
+ [self._pipe_file(k, v, **kwargs) for k, v in path.items()],
416
+ batch_size=batch_size,
417
+ nofiles=True,
418
+ )
419
+
420
+ async def _process_limits(self, url, start, end):
421
+ """Helper for "Range"-based _cat_file"""
422
+ size = None
423
+ suff = False
424
+ if start is not None and start < 0:
425
+ # if start is negative and end None, end is the "suffix length"
426
+ if end is None:
427
+ end = -start
428
+ start = ""
429
+ suff = True
430
+ else:
431
+ size = size or (await self._info(url))["size"]
432
+ start = size + start
433
+ elif start is None:
434
+ start = 0
435
+ if not suff:
436
+ if end is not None and end < 0:
437
+ if start is not None:
438
+ size = size or (await self._info(url))["size"]
439
+ end = size + end
440
+ elif end is None:
441
+ end = ""
442
+ if isinstance(end, numbers.Integral):
443
+ end -= 1 # bytes range is inclusive
444
+ return f"bytes={start}-{end}"
445
+
446
+ async def _cat_file(self, path, start=None, end=None, **kwargs):
447
+ raise NotImplementedError
448
+
449
+ async def _cat(
450
+ self, path, recursive=False, on_error="raise", batch_size=None, **kwargs
451
+ ):
452
+ paths = await self._expand_path(path, recursive=recursive)
453
+ coros = [self._cat_file(path, **kwargs) for path in paths]
454
+ batch_size = batch_size or self.batch_size
455
+ out = await _run_coros_in_chunks(
456
+ coros, batch_size=batch_size, nofiles=True, return_exceptions=True
457
+ )
458
+ if on_error == "raise":
459
+ ex = next(filter(is_exception, out), False)
460
+ if ex:
461
+ raise ex
462
+ if (
463
+ len(paths) > 1
464
+ or isinstance(path, list)
465
+ or paths[0] != self._strip_protocol(path)
466
+ ):
467
+ return {
468
+ k: v
469
+ for k, v in zip(paths, out)
470
+ if on_error != "omit" or not is_exception(v)
471
+ }
472
+ else:
473
+ return out[0]
474
+
475
+ async def _cat_ranges(
476
+ self,
477
+ paths,
478
+ starts,
479
+ ends,
480
+ max_gap=None,
481
+ batch_size=None,
482
+ on_error="return",
483
+ **kwargs,
484
+ ):
485
+ """Get the contents of byte ranges from one or more files
486
+
487
+ Parameters
488
+ ----------
489
+ paths: list
490
+ A list of of filepaths on this filesystems
491
+ starts, ends: int or list
492
+ Bytes limits of the read. If using a single int, the same value will be
493
+ used to read all the specified files.
494
+ """
495
+ # TODO: on_error
496
+ if max_gap is not None:
497
+ # use utils.merge_offset_ranges
498
+ raise NotImplementedError
499
+ if not isinstance(paths, list):
500
+ raise TypeError
501
+ if not isinstance(starts, Iterable):
502
+ starts = [starts] * len(paths)
503
+ if not isinstance(ends, Iterable):
504
+ ends = [ends] * len(paths)
505
+ if len(starts) != len(paths) or len(ends) != len(paths):
506
+ raise ValueError
507
+ coros = [
508
+ self._cat_file(p, start=s, end=e, **kwargs)
509
+ for p, s, e in zip(paths, starts, ends)
510
+ ]
511
+ batch_size = batch_size or self.batch_size
512
+ return await _run_coros_in_chunks(
513
+ coros, batch_size=batch_size, nofiles=True, return_exceptions=True
514
+ )
515
+
516
+ async def _put_file(self, lpath, rpath, **kwargs):
517
+ raise NotImplementedError
518
+
519
+ async def _put(
520
+ self,
521
+ lpath,
522
+ rpath,
523
+ recursive=False,
524
+ callback=DEFAULT_CALLBACK,
525
+ batch_size=None,
526
+ maxdepth=None,
527
+ **kwargs,
528
+ ):
529
+ """Copy file(s) from local.
530
+
531
+ Copies a specific file or tree of files (if recursive=True). If rpath
532
+ ends with a "/", it will be assumed to be a directory, and target files
533
+ will go within.
534
+
535
+ The put_file method will be called concurrently on a batch of files. The
536
+ batch_size option can configure the amount of futures that can be executed
537
+ at the same time. If it is -1, then all the files will be uploaded concurrently.
538
+ The default can be set for this instance by passing "batch_size" in the
539
+ constructor, or for all instances by setting the "gather_batch_size" key
540
+ in ``fsspec.config.conf``, falling back to 1/8th of the system limit .
541
+ """
542
+ if isinstance(lpath, list) and isinstance(rpath, list):
543
+ # No need to expand paths when both source and destination
544
+ # are provided as lists
545
+ rpaths = rpath
546
+ lpaths = lpath
547
+ else:
548
+ source_is_str = isinstance(lpath, str)
549
+ if source_is_str:
550
+ lpath = make_path_posix(lpath)
551
+ fs = LocalFileSystem()
552
+ lpaths = fs.expand_path(lpath, recursive=recursive, maxdepth=maxdepth)
553
+ if source_is_str and (not recursive or maxdepth is not None):
554
+ # Non-recursive glob does not copy directories
555
+ lpaths = [p for p in lpaths if not (trailing_sep(p) or fs.isdir(p))]
556
+ if not lpaths:
557
+ return
558
+
559
+ source_is_file = len(lpaths) == 1
560
+ dest_is_dir = isinstance(rpath, str) and (
561
+ trailing_sep(rpath) or await self._isdir(rpath)
562
+ )
563
+
564
+ rpath = self._strip_protocol(rpath)
565
+ exists = source_is_str and (
566
+ (has_magic(lpath) and source_is_file)
567
+ or (not has_magic(lpath) and dest_is_dir and not trailing_sep(lpath))
568
+ )
569
+ rpaths = other_paths(
570
+ lpaths,
571
+ rpath,
572
+ exists=exists,
573
+ flatten=not source_is_str,
574
+ )
575
+
576
+ is_dir = {l: os.path.isdir(l) for l in lpaths}
577
+ rdirs = [r for l, r in zip(lpaths, rpaths) if is_dir[l]]
578
+ file_pairs = [(l, r) for l, r in zip(lpaths, rpaths) if not is_dir[l]]
579
+
580
+ await asyncio.gather(*[self._makedirs(d, exist_ok=True) for d in rdirs])
581
+ batch_size = batch_size or self.batch_size
582
+
583
+ coros = []
584
+ callback.set_size(len(file_pairs))
585
+ for lfile, rfile in file_pairs:
586
+ put_file = callback.branch_coro(self._put_file)
587
+ coros.append(put_file(lfile, rfile, **kwargs))
588
+
589
+ return await _run_coros_in_chunks(
590
+ coros, batch_size=batch_size, callback=callback
591
+ )
592
+
593
+ async def _get_file(self, rpath, lpath, **kwargs):
594
+ raise NotImplementedError
595
+
596
+ async def _get(
597
+ self,
598
+ rpath,
599
+ lpath,
600
+ recursive=False,
601
+ callback=DEFAULT_CALLBACK,
602
+ maxdepth=None,
603
+ **kwargs,
604
+ ):
605
+ """Copy file(s) to local.
606
+
607
+ Copies a specific file or tree of files (if recursive=True). If lpath
608
+ ends with a "/", it will be assumed to be a directory, and target files
609
+ will go within. Can submit a list of paths, which may be glob-patterns
610
+ and will be expanded.
611
+
612
+ The get_file method will be called concurrently on a batch of files. The
613
+ batch_size option can configure the amount of futures that can be executed
614
+ at the same time. If it is -1, then all the files will be uploaded concurrently.
615
+ The default can be set for this instance by passing "batch_size" in the
616
+ constructor, or for all instances by setting the "gather_batch_size" key
617
+ in ``fsspec.config.conf``, falling back to 1/8th of the system limit .
618
+ """
619
+ if isinstance(lpath, list) and isinstance(rpath, list):
620
+ # No need to expand paths when both source and destination
621
+ # are provided as lists
622
+ rpaths = rpath
623
+ lpaths = lpath
624
+ else:
625
+ source_is_str = isinstance(rpath, str)
626
+ # First check for rpath trailing slash as _strip_protocol removes it.
627
+ source_not_trailing_sep = source_is_str and not trailing_sep(rpath)
628
+ rpath = self._strip_protocol(rpath)
629
+ rpaths = await self._expand_path(
630
+ rpath, recursive=recursive, maxdepth=maxdepth
631
+ )
632
+ if source_is_str and (not recursive or maxdepth is not None):
633
+ # Non-recursive glob does not copy directories
634
+ rpaths = [
635
+ p for p in rpaths if not (trailing_sep(p) or await self._isdir(p))
636
+ ]
637
+ if not rpaths:
638
+ return
639
+
640
+ lpath = make_path_posix(lpath)
641
+ source_is_file = len(rpaths) == 1
642
+ dest_is_dir = isinstance(lpath, str) and (
643
+ trailing_sep(lpath) or LocalFileSystem().isdir(lpath)
644
+ )
645
+
646
+ exists = source_is_str and (
647
+ (has_magic(rpath) and source_is_file)
648
+ or (not has_magic(rpath) and dest_is_dir and source_not_trailing_sep)
649
+ )
650
+ lpaths = other_paths(
651
+ rpaths,
652
+ lpath,
653
+ exists=exists,
654
+ flatten=not source_is_str,
655
+ )
656
+
657
+ [os.makedirs(os.path.dirname(lp), exist_ok=True) for lp in lpaths]
658
+ batch_size = kwargs.pop("batch_size", self.batch_size)
659
+
660
+ coros = []
661
+ callback.set_size(len(lpaths))
662
+ for lpath, rpath in zip(lpaths, rpaths):
663
+ get_file = callback.branch_coro(self._get_file)
664
+ coros.append(get_file(rpath, lpath, **kwargs))
665
+ return await _run_coros_in_chunks(
666
+ coros, batch_size=batch_size, callback=callback
667
+ )
668
+
669
+ async def _isfile(self, path):
670
+ try:
671
+ return (await self._info(path))["type"] == "file"
672
+ except: # noqa: E722
673
+ return False
674
+
675
+ async def _isdir(self, path):
676
+ try:
677
+ return (await self._info(path))["type"] == "directory"
678
+ except OSError:
679
+ return False
680
+
681
+ async def _size(self, path):
682
+ return (await self._info(path)).get("size", None)
683
+
684
+ async def _sizes(self, paths, batch_size=None):
685
+ batch_size = batch_size or self.batch_size
686
+ return await _run_coros_in_chunks(
687
+ [self._size(p) for p in paths], batch_size=batch_size
688
+ )
689
+
690
+ async def _exists(self, path, **kwargs):
691
+ try:
692
+ await self._info(path, **kwargs)
693
+ return True
694
+ except FileNotFoundError:
695
+ return False
696
+
697
+ async def _info(self, path, **kwargs):
698
+ raise NotImplementedError
699
+
700
+ async def _ls(self, path, detail=True, **kwargs):
701
+ raise NotImplementedError
702
+
703
+ async def _walk(self, path, maxdepth=None, on_error="omit", **kwargs):
704
+ if maxdepth is not None and maxdepth < 1:
705
+ raise ValueError("maxdepth must be at least 1")
706
+
707
+ path = self._strip_protocol(path)
708
+ full_dirs = {}
709
+ dirs = {}
710
+ files = {}
711
+
712
+ detail = kwargs.pop("detail", False)
713
+ try:
714
+ listing = await self._ls(path, detail=True, **kwargs)
715
+ except (FileNotFoundError, OSError) as e:
716
+ if on_error == "raise":
717
+ raise
718
+ elif callable(on_error):
719
+ on_error(e)
720
+ if detail:
721
+ yield path, {}, {}
722
+ else:
723
+ yield path, [], []
724
+ return
725
+
726
+ for info in listing:
727
+ # each info name must be at least [path]/part , but here
728
+ # we check also for names like [path]/part/
729
+ pathname = info["name"].rstrip("/")
730
+ name = pathname.rsplit("/", 1)[-1]
731
+ if info["type"] == "directory" and pathname != path:
732
+ # do not include "self" path
733
+ full_dirs[name] = pathname
734
+ dirs[name] = info
735
+ elif pathname == path:
736
+ # file-like with same name as give path
737
+ files[""] = info
738
+ else:
739
+ files[name] = info
740
+
741
+ if detail:
742
+ yield path, dirs, files
743
+ else:
744
+ yield path, list(dirs), list(files)
745
+
746
+ if maxdepth is not None:
747
+ maxdepth -= 1
748
+ if maxdepth < 1:
749
+ return
750
+
751
+ for d in dirs:
752
+ async for _ in self._walk(
753
+ full_dirs[d], maxdepth=maxdepth, detail=detail, **kwargs
754
+ ):
755
+ yield _
756
+
757
+ async def _glob(self, path, maxdepth=None, **kwargs):
758
+ if maxdepth is not None and maxdepth < 1:
759
+ raise ValueError("maxdepth must be at least 1")
760
+
761
+ import re
762
+
763
+ seps = (os.path.sep, os.path.altsep) if os.path.altsep else (os.path.sep,)
764
+ ends_with_sep = path.endswith(seps) # _strip_protocol strips trailing slash
765
+ path = self._strip_protocol(path)
766
+ append_slash_to_dirname = ends_with_sep or path.endswith(
767
+ tuple(sep + "**" for sep in seps)
768
+ )
769
+ idx_star = path.find("*") if path.find("*") >= 0 else len(path)
770
+ idx_qmark = path.find("?") if path.find("?") >= 0 else len(path)
771
+ idx_brace = path.find("[") if path.find("[") >= 0 else len(path)
772
+
773
+ min_idx = min(idx_star, idx_qmark, idx_brace)
774
+
775
+ detail = kwargs.pop("detail", False)
776
+
777
+ if not has_magic(path):
778
+ if await self._exists(path, **kwargs):
779
+ if not detail:
780
+ return [path]
781
+ else:
782
+ return {path: await self._info(path, **kwargs)}
783
+ else:
784
+ if not detail:
785
+ return [] # glob of non-existent returns empty
786
+ else:
787
+ return {}
788
+ elif "/" in path[:min_idx]:
789
+ min_idx = path[:min_idx].rindex("/")
790
+ root = path[: min_idx + 1]
791
+ depth = path[min_idx + 1 :].count("/") + 1
792
+ else:
793
+ root = ""
794
+ depth = path[min_idx + 1 :].count("/") + 1
795
+
796
+ if "**" in path:
797
+ if maxdepth is not None:
798
+ idx_double_stars = path.find("**")
799
+ depth_double_stars = path[idx_double_stars:].count("/") + 1
800
+ depth = depth - depth_double_stars + maxdepth
801
+ else:
802
+ depth = None
803
+
804
+ allpaths = await self._find(
805
+ root, maxdepth=depth, withdirs=True, detail=True, **kwargs
806
+ )
807
+
808
+ pattern = glob_translate(path + ("/" if ends_with_sep else ""))
809
+ pattern = re.compile(pattern)
810
+
811
+ out = {
812
+ p: info
813
+ for p, info in sorted(allpaths.items())
814
+ if pattern.match(
815
+ (
816
+ p + "/"
817
+ if append_slash_to_dirname and info["type"] == "directory"
818
+ else p
819
+ )
820
+ )
821
+ }
822
+
823
+ if detail:
824
+ return out
825
+ else:
826
+ return list(out)
827
+
828
+ async def _du(self, path, total=True, maxdepth=None, **kwargs):
829
+ sizes = {}
830
+ # async for?
831
+ for f in await self._find(path, maxdepth=maxdepth, **kwargs):
832
+ info = await self._info(f)
833
+ sizes[info["name"]] = info["size"]
834
+ if total:
835
+ return sum(sizes.values())
836
+ else:
837
+ return sizes
838
+
839
+ async def _find(self, path, maxdepth=None, withdirs=False, **kwargs):
840
+ path = self._strip_protocol(path)
841
+ out = {}
842
+ detail = kwargs.pop("detail", False)
843
+
844
+ # Add the root directory if withdirs is requested
845
+ # This is needed for posix glob compliance
846
+ if withdirs and path != "" and await self._isdir(path):
847
+ out[path] = await self._info(path)
848
+
849
+ # async for?
850
+ async for _, dirs, files in self._walk(path, maxdepth, detail=True, **kwargs):
851
+ if withdirs:
852
+ files.update(dirs)
853
+ out.update({info["name"]: info for name, info in files.items()})
854
+ if not out and (await self._isfile(path)):
855
+ # walk works on directories, but find should also return [path]
856
+ # when path happens to be a file
857
+ out[path] = {}
858
+ names = sorted(out)
859
+ if not detail:
860
+ return names
861
+ else:
862
+ return {name: out[name] for name in names}
863
+
864
+ async def _expand_path(self, path, recursive=False, maxdepth=None):
865
+ if maxdepth is not None and maxdepth < 1:
866
+ raise ValueError("maxdepth must be at least 1")
867
+
868
+ if isinstance(path, str):
869
+ out = await self._expand_path([path], recursive, maxdepth)
870
+ else:
871
+ out = set()
872
+ path = [self._strip_protocol(p) for p in path]
873
+ for p in path: # can gather here
874
+ if has_magic(p):
875
+ bit = set(await self._glob(p, maxdepth=maxdepth))
876
+ out |= bit
877
+ if recursive:
878
+ # glob call above expanded one depth so if maxdepth is defined
879
+ # then decrement it in expand_path call below. If it is zero
880
+ # after decrementing then avoid expand_path call.
881
+ if maxdepth is not None and maxdepth <= 1:
882
+ continue
883
+ out |= set(
884
+ await self._expand_path(
885
+ list(bit),
886
+ recursive=recursive,
887
+ maxdepth=maxdepth - 1 if maxdepth is not None else None,
888
+ )
889
+ )
890
+ continue
891
+ elif recursive:
892
+ rec = set(await self._find(p, maxdepth=maxdepth, withdirs=True))
893
+ out |= rec
894
+ if p not in out and (recursive is False or (await self._exists(p))):
895
+ # should only check once, for the root
896
+ out.add(p)
897
+ if not out:
898
+ raise FileNotFoundError(path)
899
+ return sorted(out)
900
+
901
+ async def _mkdir(self, path, create_parents=True, **kwargs):
902
+ pass # not necessary to implement, may not have directories
903
+
904
+ async def _makedirs(self, path, exist_ok=False):
905
+ pass # not necessary to implement, may not have directories
906
+
907
+ async def open_async(self, path, mode="rb", **kwargs):
908
+ if "b" not in mode or kwargs.get("compression"):
909
+ raise ValueError
910
+ raise NotImplementedError
911
+
912
+
913
+ def mirror_sync_methods(obj):
914
+ """Populate sync and async methods for obj
915
+
916
+ For each method will create a sync version if the name refers to an async method
917
+ (coroutine) and there is no override in the child class; will create an async
918
+ method for the corresponding sync method if there is no implementation.
919
+
920
+ Uses the methods specified in
921
+ - async_methods: the set that an implementation is expected to provide
922
+ - default_async_methods: that can be derived from their sync version in
923
+ AbstractFileSystem
924
+ - AsyncFileSystem: async-specific default coroutines
925
+ """
926
+ from fsspec import AbstractFileSystem
927
+
928
+ for method in async_methods + dir(AsyncFileSystem):
929
+ if not method.startswith("_"):
930
+ continue
931
+ smethod = method[1:]
932
+ if private.match(method):
933
+ isco = inspect.iscoroutinefunction(getattr(obj, method, None))
934
+ unsync = getattr(getattr(obj, smethod, False), "__func__", None)
935
+ is_default = unsync is getattr(AbstractFileSystem, smethod, "")
936
+ if isco and is_default:
937
+ mth = sync_wrapper(getattr(obj, method), obj=obj)
938
+ setattr(obj, smethod, mth)
939
+ if not mth.__doc__:
940
+ mth.__doc__ = getattr(
941
+ getattr(AbstractFileSystem, smethod, None), "__doc__", ""
942
+ )
943
+
944
+
945
+ class FSSpecCoroutineCancel(Exception):
946
+ pass
947
+
948
+
949
+ def _dump_running_tasks(
950
+ printout=True, cancel=True, exc=FSSpecCoroutineCancel, with_task=False
951
+ ):
952
+ import traceback
953
+
954
+ tasks = [t for t in asyncio.tasks.all_tasks(loop[0]) if not t.done()]
955
+ if printout:
956
+ [task.print_stack() for task in tasks]
957
+ out = [
958
+ {
959
+ "locals": task._coro.cr_frame.f_locals,
960
+ "file": task._coro.cr_frame.f_code.co_filename,
961
+ "firstline": task._coro.cr_frame.f_code.co_firstlineno,
962
+ "linelo": task._coro.cr_frame.f_lineno,
963
+ "stack": traceback.format_stack(task._coro.cr_frame),
964
+ "task": task if with_task else None,
965
+ }
966
+ for task in tasks
967
+ ]
968
+ if cancel:
969
+ for t in tasks:
970
+ cbs = t._callbacks
971
+ t.cancel()
972
+ asyncio.futures.Future.set_exception(t, exc)
973
+ asyncio.futures.Future.cancel(t)
974
+ [cb[0](t) for cb in cbs] # cancels any dependent concurrent.futures
975
+ try:
976
+ t._coro.throw(exc) # exits coro, unless explicitly handled
977
+ except exc:
978
+ pass
979
+ return out
980
+
981
+
982
+ class AbstractAsyncStreamedFile(AbstractBufferedFile):
983
+ # no read buffering, and always auto-commit
984
+ # TODO: readahead might still be useful here, but needs async version
985
+
986
+ async def read(self, length=-1):
987
+ """
988
+ Return data from cache, or fetch pieces as necessary
989
+
990
+ Parameters
991
+ ----------
992
+ length: int (-1)
993
+ Number of bytes to read; if <0, all remaining bytes.
994
+ """
995
+ length = -1 if length is None else int(length)
996
+ if self.mode != "rb":
997
+ raise ValueError("File not in read mode")
998
+ if length < 0:
999
+ length = self.size - self.loc
1000
+ if self.closed:
1001
+ raise ValueError("I/O operation on closed file.")
1002
+ if length == 0:
1003
+ # don't even bother calling fetch
1004
+ return b""
1005
+ out = await self._fetch_range(self.loc, self.loc + length)
1006
+ self.loc += len(out)
1007
+ return out
1008
+
1009
+ async def write(self, data):
1010
+ """
1011
+ Write data to buffer.
1012
+
1013
+ Buffer only sent on flush() or if buffer is greater than
1014
+ or equal to blocksize.
1015
+
1016
+ Parameters
1017
+ ----------
1018
+ data: bytes
1019
+ Set of bytes to be written.
1020
+ """
1021
+ if self.mode not in {"wb", "ab"}:
1022
+ raise ValueError("File not in write mode")
1023
+ if self.closed:
1024
+ raise ValueError("I/O operation on closed file.")
1025
+ if self.forced:
1026
+ raise ValueError("This file has been force-flushed, can only close")
1027
+ out = self.buffer.write(data)
1028
+ self.loc += out
1029
+ if self.buffer.tell() >= self.blocksize:
1030
+ await self.flush()
1031
+ return out
1032
+
1033
+ async def close(self):
1034
+ """Close file
1035
+
1036
+ Finalizes writes, discards cache
1037
+ """
1038
+ if getattr(self, "_unclosable", False):
1039
+ return
1040
+ if self.closed:
1041
+ return
1042
+ if self.mode == "rb":
1043
+ self.cache = None
1044
+ else:
1045
+ if not self.forced:
1046
+ await self.flush(force=True)
1047
+
1048
+ if self.fs is not None:
1049
+ self.fs.invalidate_cache(self.path)
1050
+ self.fs.invalidate_cache(self.fs._parent(self.path))
1051
+
1052
+ self.closed = True
1053
+
1054
+ async def flush(self, force=False):
1055
+ if self.closed:
1056
+ raise ValueError("Flush on closed file")
1057
+ if force and self.forced:
1058
+ raise ValueError("Force flush cannot be called more than once")
1059
+ if force:
1060
+ self.forced = True
1061
+
1062
+ if self.mode not in {"wb", "ab"}:
1063
+ # no-op to flush on read-mode
1064
+ return
1065
+
1066
+ if not force and self.buffer.tell() < self.blocksize:
1067
+ # Defer write on small block
1068
+ return
1069
+
1070
+ if self.offset is None:
1071
+ # Initialize a multipart upload
1072
+ self.offset = 0
1073
+ try:
1074
+ await self._initiate_upload()
1075
+ except: # noqa: E722
1076
+ self.closed = True
1077
+ raise
1078
+
1079
+ if await self._upload_chunk(final=force) is not False:
1080
+ self.offset += self.buffer.seek(0, 2)
1081
+ self.buffer = io.BytesIO()
1082
+
1083
+ async def __aenter__(self):
1084
+ return self
1085
+
1086
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
1087
+ await self.close()
1088
+
1089
+ async def _fetch_range(self, start, end):
1090
+ raise NotImplementedError
1091
+
1092
+ async def _initiate_upload(self):
1093
+ pass
1094
+
1095
+ async def _upload_chunk(self, final=False):
1096
+ raise NotImplementedError
parrot/lib/python3.10/site-packages/fsspec/caching.py ADDED
@@ -0,0 +1,951 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import collections
4
+ import functools
5
+ import logging
6
+ import math
7
+ import os
8
+ import threading
9
+ import warnings
10
+ from concurrent.futures import Future, ThreadPoolExecutor
11
+ from typing import (
12
+ TYPE_CHECKING,
13
+ Any,
14
+ Callable,
15
+ ClassVar,
16
+ Generic,
17
+ NamedTuple,
18
+ Optional,
19
+ OrderedDict,
20
+ TypeVar,
21
+ )
22
+
23
+ if TYPE_CHECKING:
24
+ import mmap
25
+
26
+ from typing_extensions import ParamSpec
27
+
28
+ P = ParamSpec("P")
29
+ else:
30
+ P = TypeVar("P")
31
+
32
+ T = TypeVar("T")
33
+
34
+
35
+ logger = logging.getLogger("fsspec")
36
+
37
+ Fetcher = Callable[[int, int], bytes] # Maps (start, end) to bytes
38
+
39
+
40
+ class BaseCache:
41
+ """Pass-though cache: doesn't keep anything, calls every time
42
+
43
+ Acts as base class for other cachers
44
+
45
+ Parameters
46
+ ----------
47
+ blocksize: int
48
+ How far to read ahead in numbers of bytes
49
+ fetcher: func
50
+ Function of the form f(start, end) which gets bytes from remote as
51
+ specified
52
+ size: int
53
+ How big this file is
54
+ """
55
+
56
+ name: ClassVar[str] = "none"
57
+
58
+ def __init__(self, blocksize: int, fetcher: Fetcher, size: int) -> None:
59
+ self.blocksize = blocksize
60
+ self.nblocks = 0
61
+ self.fetcher = fetcher
62
+ self.size = size
63
+ self.hit_count = 0
64
+ self.miss_count = 0
65
+ # the bytes that we actually requested
66
+ self.total_requested_bytes = 0
67
+
68
+ def _fetch(self, start: int | None, stop: int | None) -> bytes:
69
+ if start is None:
70
+ start = 0
71
+ if stop is None:
72
+ stop = self.size
73
+ if start >= self.size or start >= stop:
74
+ return b""
75
+ return self.fetcher(start, stop)
76
+
77
+ def _reset_stats(self) -> None:
78
+ """Reset hit and miss counts for a more ganular report e.g. by file."""
79
+ self.hit_count = 0
80
+ self.miss_count = 0
81
+ self.total_requested_bytes = 0
82
+
83
+ def _log_stats(self) -> str:
84
+ """Return a formatted string of the cache statistics."""
85
+ if self.hit_count == 0 and self.miss_count == 0:
86
+ # a cache that does nothing, this is for logs only
87
+ return ""
88
+ return " , %s: %d hits, %d misses, %d total requested bytes" % (
89
+ self.name,
90
+ self.hit_count,
91
+ self.miss_count,
92
+ self.total_requested_bytes,
93
+ )
94
+
95
+ def __repr__(self) -> str:
96
+ # TODO: use rich for better formatting
97
+ return f"""
98
+ <{self.__class__.__name__}:
99
+ block size : {self.blocksize}
100
+ block count : {self.nblocks}
101
+ file size : {self.size}
102
+ cache hits : {self.hit_count}
103
+ cache misses: {self.miss_count}
104
+ total requested bytes: {self.total_requested_bytes}>
105
+ """
106
+
107
+
108
+ class MMapCache(BaseCache):
109
+ """memory-mapped sparse file cache
110
+
111
+ Opens temporary file, which is filled blocks-wise when data is requested.
112
+ Ensure there is enough disc space in the temporary location.
113
+
114
+ This cache method might only work on posix
115
+ """
116
+
117
+ name = "mmap"
118
+
119
+ def __init__(
120
+ self,
121
+ blocksize: int,
122
+ fetcher: Fetcher,
123
+ size: int,
124
+ location: str | None = None,
125
+ blocks: set[int] | None = None,
126
+ ) -> None:
127
+ super().__init__(blocksize, fetcher, size)
128
+ self.blocks = set() if blocks is None else blocks
129
+ self.location = location
130
+ self.cache = self._makefile()
131
+
132
+ def _makefile(self) -> mmap.mmap | bytearray:
133
+ import mmap
134
+ import tempfile
135
+
136
+ if self.size == 0:
137
+ return bytearray()
138
+
139
+ # posix version
140
+ if self.location is None or not os.path.exists(self.location):
141
+ if self.location is None:
142
+ fd = tempfile.TemporaryFile()
143
+ self.blocks = set()
144
+ else:
145
+ fd = open(self.location, "wb+")
146
+ fd.seek(self.size - 1)
147
+ fd.write(b"1")
148
+ fd.flush()
149
+ else:
150
+ fd = open(self.location, "r+b")
151
+
152
+ return mmap.mmap(fd.fileno(), self.size)
153
+
154
+ def _fetch(self, start: int | None, end: int | None) -> bytes:
155
+ logger.debug(f"MMap cache fetching {start}-{end}")
156
+ if start is None:
157
+ start = 0
158
+ if end is None:
159
+ end = self.size
160
+ if start >= self.size or start >= end:
161
+ return b""
162
+ start_block = start // self.blocksize
163
+ end_block = end // self.blocksize
164
+ need = [i for i in range(start_block, end_block + 1) if i not in self.blocks]
165
+ hits = [i for i in range(start_block, end_block + 1) if i in self.blocks]
166
+ self.miss_count += len(need)
167
+ self.hit_count += len(hits)
168
+ while need:
169
+ # TODO: not a for loop so we can consolidate blocks later to
170
+ # make fewer fetch calls; this could be parallel
171
+ i = need.pop(0)
172
+
173
+ sstart = i * self.blocksize
174
+ send = min(sstart + self.blocksize, self.size)
175
+ self.total_requested_bytes += send - sstart
176
+ logger.debug(f"MMap get block #{i} ({sstart}-{send})")
177
+ self.cache[sstart:send] = self.fetcher(sstart, send)
178
+ self.blocks.add(i)
179
+
180
+ return self.cache[start:end]
181
+
182
+ def __getstate__(self) -> dict[str, Any]:
183
+ state = self.__dict__.copy()
184
+ # Remove the unpicklable entries.
185
+ del state["cache"]
186
+ return state
187
+
188
+ def __setstate__(self, state: dict[str, Any]) -> None:
189
+ # Restore instance attributes
190
+ self.__dict__.update(state)
191
+ self.cache = self._makefile()
192
+
193
+
194
+ class ReadAheadCache(BaseCache):
195
+ """Cache which reads only when we get beyond a block of data
196
+
197
+ This is a much simpler version of BytesCache, and does not attempt to
198
+ fill holes in the cache or keep fragments alive. It is best suited to
199
+ many small reads in a sequential order (e.g., reading lines from a file).
200
+ """
201
+
202
+ name = "readahead"
203
+
204
+ def __init__(self, blocksize: int, fetcher: Fetcher, size: int) -> None:
205
+ super().__init__(blocksize, fetcher, size)
206
+ self.cache = b""
207
+ self.start = 0
208
+ self.end = 0
209
+
210
+ def _fetch(self, start: int | None, end: int | None) -> bytes:
211
+ if start is None:
212
+ start = 0
213
+ if end is None or end > self.size:
214
+ end = self.size
215
+ if start >= self.size or start >= end:
216
+ return b""
217
+ l = end - start
218
+ if start >= self.start and end <= self.end:
219
+ # cache hit
220
+ self.hit_count += 1
221
+ return self.cache[start - self.start : end - self.start]
222
+ elif self.start <= start < self.end:
223
+ # partial hit
224
+ self.miss_count += 1
225
+ part = self.cache[start - self.start :]
226
+ l -= len(part)
227
+ start = self.end
228
+ else:
229
+ # miss
230
+ self.miss_count += 1
231
+ part = b""
232
+ end = min(self.size, end + self.blocksize)
233
+ self.total_requested_bytes += end - start
234
+ self.cache = self.fetcher(start, end) # new block replaces old
235
+ self.start = start
236
+ self.end = self.start + len(self.cache)
237
+ return part + self.cache[:l]
238
+
239
+
240
+ class FirstChunkCache(BaseCache):
241
+ """Caches the first block of a file only
242
+
243
+ This may be useful for file types where the metadata is stored in the header,
244
+ but is randomly accessed.
245
+ """
246
+
247
+ name = "first"
248
+
249
+ def __init__(self, blocksize: int, fetcher: Fetcher, size: int) -> None:
250
+ if blocksize > size:
251
+ # this will buffer the whole thing
252
+ blocksize = size
253
+ super().__init__(blocksize, fetcher, size)
254
+ self.cache: bytes | None = None
255
+
256
+ def _fetch(self, start: int | None, end: int | None) -> bytes:
257
+ start = start or 0
258
+ if start > self.size:
259
+ logger.debug("FirstChunkCache: requested start > file size")
260
+ return b""
261
+
262
+ end = min(end, self.size)
263
+
264
+ if start < self.blocksize:
265
+ if self.cache is None:
266
+ self.miss_count += 1
267
+ if end > self.blocksize:
268
+ self.total_requested_bytes += end
269
+ data = self.fetcher(0, end)
270
+ self.cache = data[: self.blocksize]
271
+ return data[start:]
272
+ self.cache = self.fetcher(0, self.blocksize)
273
+ self.total_requested_bytes += self.blocksize
274
+ part = self.cache[start:end]
275
+ if end > self.blocksize:
276
+ self.total_requested_bytes += end - self.blocksize
277
+ part += self.fetcher(self.blocksize, end)
278
+ self.hit_count += 1
279
+ return part
280
+ else:
281
+ self.miss_count += 1
282
+ self.total_requested_bytes += end - start
283
+ return self.fetcher(start, end)
284
+
285
+
286
+ class BlockCache(BaseCache):
287
+ """
288
+ Cache holding memory as a set of blocks.
289
+
290
+ Requests are only ever made ``blocksize`` at a time, and are
291
+ stored in an LRU cache. The least recently accessed block is
292
+ discarded when more than ``maxblocks`` are stored.
293
+
294
+ Parameters
295
+ ----------
296
+ blocksize : int
297
+ The number of bytes to store in each block.
298
+ Requests are only ever made for ``blocksize``, so this
299
+ should balance the overhead of making a request against
300
+ the granularity of the blocks.
301
+ fetcher : Callable
302
+ size : int
303
+ The total size of the file being cached.
304
+ maxblocks : int
305
+ The maximum number of blocks to cache for. The maximum memory
306
+ use for this cache is then ``blocksize * maxblocks``.
307
+ """
308
+
309
+ name = "blockcache"
310
+
311
+ def __init__(
312
+ self, blocksize: int, fetcher: Fetcher, size: int, maxblocks: int = 32
313
+ ) -> None:
314
+ super().__init__(blocksize, fetcher, size)
315
+ self.nblocks = math.ceil(size / blocksize)
316
+ self.maxblocks = maxblocks
317
+ self._fetch_block_cached = functools.lru_cache(maxblocks)(self._fetch_block)
318
+
319
+ def cache_info(self):
320
+ """
321
+ The statistics on the block cache.
322
+
323
+ Returns
324
+ -------
325
+ NamedTuple
326
+ Returned directly from the LRU Cache used internally.
327
+ """
328
+ return self._fetch_block_cached.cache_info()
329
+
330
+ def __getstate__(self) -> dict[str, Any]:
331
+ state = self.__dict__
332
+ del state["_fetch_block_cached"]
333
+ return state
334
+
335
+ def __setstate__(self, state: dict[str, Any]) -> None:
336
+ self.__dict__.update(state)
337
+ self._fetch_block_cached = functools.lru_cache(state["maxblocks"])(
338
+ self._fetch_block
339
+ )
340
+
341
+ def _fetch(self, start: int | None, end: int | None) -> bytes:
342
+ if start is None:
343
+ start = 0
344
+ if end is None:
345
+ end = self.size
346
+ if start >= self.size or start >= end:
347
+ return b""
348
+
349
+ # byte position -> block numbers
350
+ start_block_number = start // self.blocksize
351
+ end_block_number = end // self.blocksize
352
+
353
+ # these are cached, so safe to do multiple calls for the same start and end.
354
+ for block_number in range(start_block_number, end_block_number + 1):
355
+ self._fetch_block_cached(block_number)
356
+
357
+ return self._read_cache(
358
+ start,
359
+ end,
360
+ start_block_number=start_block_number,
361
+ end_block_number=end_block_number,
362
+ )
363
+
364
+ def _fetch_block(self, block_number: int) -> bytes:
365
+ """
366
+ Fetch the block of data for `block_number`.
367
+ """
368
+ if block_number > self.nblocks:
369
+ raise ValueError(
370
+ f"'block_number={block_number}' is greater than "
371
+ f"the number of blocks ({self.nblocks})"
372
+ )
373
+
374
+ start = block_number * self.blocksize
375
+ end = start + self.blocksize
376
+ self.total_requested_bytes += end - start
377
+ self.miss_count += 1
378
+ logger.info("BlockCache fetching block %d", block_number)
379
+ block_contents = super()._fetch(start, end)
380
+ return block_contents
381
+
382
+ def _read_cache(
383
+ self, start: int, end: int, start_block_number: int, end_block_number: int
384
+ ) -> bytes:
385
+ """
386
+ Read from our block cache.
387
+
388
+ Parameters
389
+ ----------
390
+ start, end : int
391
+ The start and end byte positions.
392
+ start_block_number, end_block_number : int
393
+ The start and end block numbers.
394
+ """
395
+ start_pos = start % self.blocksize
396
+ end_pos = end % self.blocksize
397
+
398
+ self.hit_count += 1
399
+ if start_block_number == end_block_number:
400
+ block: bytes = self._fetch_block_cached(start_block_number)
401
+ return block[start_pos:end_pos]
402
+
403
+ else:
404
+ # read from the initial
405
+ out = [self._fetch_block_cached(start_block_number)[start_pos:]]
406
+
407
+ # intermediate blocks
408
+ # Note: it'd be nice to combine these into one big request. However
409
+ # that doesn't play nicely with our LRU cache.
410
+ out.extend(
411
+ map(
412
+ self._fetch_block_cached,
413
+ range(start_block_number + 1, end_block_number),
414
+ )
415
+ )
416
+
417
+ # final block
418
+ out.append(self._fetch_block_cached(end_block_number)[:end_pos])
419
+
420
+ return b"".join(out)
421
+
422
+
423
+ class BytesCache(BaseCache):
424
+ """Cache which holds data in a in-memory bytes object
425
+
426
+ Implements read-ahead by the block size, for semi-random reads progressing
427
+ through the file.
428
+
429
+ Parameters
430
+ ----------
431
+ trim: bool
432
+ As we read more data, whether to discard the start of the buffer when
433
+ we are more than a blocksize ahead of it.
434
+ """
435
+
436
+ name: ClassVar[str] = "bytes"
437
+
438
+ def __init__(
439
+ self, blocksize: int, fetcher: Fetcher, size: int, trim: bool = True
440
+ ) -> None:
441
+ super().__init__(blocksize, fetcher, size)
442
+ self.cache = b""
443
+ self.start: int | None = None
444
+ self.end: int | None = None
445
+ self.trim = trim
446
+
447
+ def _fetch(self, start: int | None, end: int | None) -> bytes:
448
+ # TODO: only set start/end after fetch, in case it fails?
449
+ # is this where retry logic might go?
450
+ if start is None:
451
+ start = 0
452
+ if end is None:
453
+ end = self.size
454
+ if start >= self.size or start >= end:
455
+ return b""
456
+ if (
457
+ self.start is not None
458
+ and start >= self.start
459
+ and self.end is not None
460
+ and end < self.end
461
+ ):
462
+ # cache hit: we have all the required data
463
+ offset = start - self.start
464
+ self.hit_count += 1
465
+ return self.cache[offset : offset + end - start]
466
+
467
+ if self.blocksize:
468
+ bend = min(self.size, end + self.blocksize)
469
+ else:
470
+ bend = end
471
+
472
+ if bend == start or start > self.size:
473
+ return b""
474
+
475
+ if (self.start is None or start < self.start) and (
476
+ self.end is None or end > self.end
477
+ ):
478
+ # First read, or extending both before and after
479
+ self.total_requested_bytes += bend - start
480
+ self.miss_count += 1
481
+ self.cache = self.fetcher(start, bend)
482
+ self.start = start
483
+ else:
484
+ assert self.start is not None
485
+ assert self.end is not None
486
+ self.miss_count += 1
487
+
488
+ if start < self.start:
489
+ if self.end is None or self.end - end > self.blocksize:
490
+ self.total_requested_bytes += bend - start
491
+ self.cache = self.fetcher(start, bend)
492
+ self.start = start
493
+ else:
494
+ self.total_requested_bytes += self.start - start
495
+ new = self.fetcher(start, self.start)
496
+ self.start = start
497
+ self.cache = new + self.cache
498
+ elif self.end is not None and bend > self.end:
499
+ if self.end > self.size:
500
+ pass
501
+ elif end - self.end > self.blocksize:
502
+ self.total_requested_bytes += bend - start
503
+ self.cache = self.fetcher(start, bend)
504
+ self.start = start
505
+ else:
506
+ self.total_requested_bytes += bend - self.end
507
+ new = self.fetcher(self.end, bend)
508
+ self.cache = self.cache + new
509
+
510
+ self.end = self.start + len(self.cache)
511
+ offset = start - self.start
512
+ out = self.cache[offset : offset + end - start]
513
+ if self.trim:
514
+ num = (self.end - self.start) // (self.blocksize + 1)
515
+ if num > 1:
516
+ self.start += self.blocksize * num
517
+ self.cache = self.cache[self.blocksize * num :]
518
+ return out
519
+
520
+ def __len__(self) -> int:
521
+ return len(self.cache)
522
+
523
+
524
+ class AllBytes(BaseCache):
525
+ """Cache entire contents of the file"""
526
+
527
+ name: ClassVar[str] = "all"
528
+
529
+ def __init__(
530
+ self,
531
+ blocksize: int | None = None,
532
+ fetcher: Fetcher | None = None,
533
+ size: int | None = None,
534
+ data: bytes | None = None,
535
+ ) -> None:
536
+ super().__init__(blocksize, fetcher, size) # type: ignore[arg-type]
537
+ if data is None:
538
+ self.miss_count += 1
539
+ self.total_requested_bytes += self.size
540
+ data = self.fetcher(0, self.size)
541
+ self.data = data
542
+
543
+ def _fetch(self, start: int | None, stop: int | None) -> bytes:
544
+ self.hit_count += 1
545
+ return self.data[start:stop]
546
+
547
+
548
+ class KnownPartsOfAFile(BaseCache):
549
+ """
550
+ Cache holding known file parts.
551
+
552
+ Parameters
553
+ ----------
554
+ blocksize: int
555
+ How far to read ahead in numbers of bytes
556
+ fetcher: func
557
+ Function of the form f(start, end) which gets bytes from remote as
558
+ specified
559
+ size: int
560
+ How big this file is
561
+ data: dict
562
+ A dictionary mapping explicit `(start, stop)` file-offset tuples
563
+ with known bytes.
564
+ strict: bool, default True
565
+ Whether to fetch reads that go beyond a known byte-range boundary.
566
+ If `False`, any read that ends outside a known part will be zero
567
+ padded. Note that zero padding will not be used for reads that
568
+ begin outside a known byte-range.
569
+ """
570
+
571
+ name: ClassVar[str] = "parts"
572
+
573
+ def __init__(
574
+ self,
575
+ blocksize: int,
576
+ fetcher: Fetcher,
577
+ size: int,
578
+ data: Optional[dict[tuple[int, int], bytes]] = None,
579
+ strict: bool = True,
580
+ **_: Any,
581
+ ):
582
+ super().__init__(blocksize, fetcher, size)
583
+ self.strict = strict
584
+
585
+ # simple consolidation of contiguous blocks
586
+ if data:
587
+ old_offsets = sorted(data.keys())
588
+ offsets = [old_offsets[0]]
589
+ blocks = [data.pop(old_offsets[0])]
590
+ for start, stop in old_offsets[1:]:
591
+ start0, stop0 = offsets[-1]
592
+ if start == stop0:
593
+ offsets[-1] = (start0, stop)
594
+ blocks[-1] += data.pop((start, stop))
595
+ else:
596
+ offsets.append((start, stop))
597
+ blocks.append(data.pop((start, stop)))
598
+
599
+ self.data = dict(zip(offsets, blocks))
600
+ else:
601
+ self.data = {}
602
+
603
+ def _fetch(self, start: int | None, stop: int | None) -> bytes:
604
+ if start is None:
605
+ start = 0
606
+ if stop is None:
607
+ stop = self.size
608
+
609
+ out = b""
610
+ for (loc0, loc1), data in self.data.items():
611
+ # If self.strict=False, use zero-padded data
612
+ # for reads beyond the end of a "known" buffer
613
+ if loc0 <= start < loc1:
614
+ off = start - loc0
615
+ out = data[off : off + stop - start]
616
+ if not self.strict or loc0 <= stop <= loc1:
617
+ # The request is within a known range, or
618
+ # it begins within a known range, and we
619
+ # are allowed to pad reads beyond the
620
+ # buffer with zero
621
+ out += b"\x00" * (stop - start - len(out))
622
+ self.hit_count += 1
623
+ return out
624
+ else:
625
+ # The request ends outside a known range,
626
+ # and we are being "strict" about reads
627
+ # beyond the buffer
628
+ start = loc1
629
+ break
630
+
631
+ # We only get here if there is a request outside the
632
+ # known parts of the file. In an ideal world, this
633
+ # should never happen
634
+ if self.fetcher is None:
635
+ # We cannot fetch the data, so raise an error
636
+ raise ValueError(f"Read is outside the known file parts: {(start, stop)}. ")
637
+ # We can fetch the data, but should warn the user
638
+ # that this may be slow
639
+ warnings.warn(
640
+ f"Read is outside the known file parts: {(start, stop)}. "
641
+ f"IO/caching performance may be poor!"
642
+ )
643
+ logger.debug(f"KnownPartsOfAFile cache fetching {start}-{stop}")
644
+ self.total_requested_bytes += stop - start
645
+ self.miss_count += 1
646
+ return out + super()._fetch(start, stop)
647
+
648
+
649
+ class UpdatableLRU(Generic[P, T]):
650
+ """
651
+ Custom implementation of LRU cache that allows updating keys
652
+
653
+ Used by BackgroudBlockCache
654
+ """
655
+
656
+ class CacheInfo(NamedTuple):
657
+ hits: int
658
+ misses: int
659
+ maxsize: int
660
+ currsize: int
661
+
662
+ def __init__(self, func: Callable[P, T], max_size: int = 128) -> None:
663
+ self._cache: OrderedDict[Any, T] = collections.OrderedDict()
664
+ self._func = func
665
+ self._max_size = max_size
666
+ self._hits = 0
667
+ self._misses = 0
668
+ self._lock = threading.Lock()
669
+
670
+ def __call__(self, *args: P.args, **kwargs: P.kwargs) -> T:
671
+ if kwargs:
672
+ raise TypeError(f"Got unexpected keyword argument {kwargs.keys()}")
673
+ with self._lock:
674
+ if args in self._cache:
675
+ self._cache.move_to_end(args)
676
+ self._hits += 1
677
+ return self._cache[args]
678
+
679
+ result = self._func(*args, **kwargs)
680
+
681
+ with self._lock:
682
+ self._cache[args] = result
683
+ self._misses += 1
684
+ if len(self._cache) > self._max_size:
685
+ self._cache.popitem(last=False)
686
+
687
+ return result
688
+
689
+ def is_key_cached(self, *args: Any) -> bool:
690
+ with self._lock:
691
+ return args in self._cache
692
+
693
+ def add_key(self, result: T, *args: Any) -> None:
694
+ with self._lock:
695
+ self._cache[args] = result
696
+ if len(self._cache) > self._max_size:
697
+ self._cache.popitem(last=False)
698
+
699
+ def cache_info(self) -> UpdatableLRU.CacheInfo:
700
+ with self._lock:
701
+ return self.CacheInfo(
702
+ maxsize=self._max_size,
703
+ currsize=len(self._cache),
704
+ hits=self._hits,
705
+ misses=self._misses,
706
+ )
707
+
708
+
709
+ class BackgroundBlockCache(BaseCache):
710
+ """
711
+ Cache holding memory as a set of blocks with pre-loading of
712
+ the next block in the background.
713
+
714
+ Requests are only ever made ``blocksize`` at a time, and are
715
+ stored in an LRU cache. The least recently accessed block is
716
+ discarded when more than ``maxblocks`` are stored. If the
717
+ next block is not in cache, it is loaded in a separate thread
718
+ in non-blocking way.
719
+
720
+ Parameters
721
+ ----------
722
+ blocksize : int
723
+ The number of bytes to store in each block.
724
+ Requests are only ever made for ``blocksize``, so this
725
+ should balance the overhead of making a request against
726
+ the granularity of the blocks.
727
+ fetcher : Callable
728
+ size : int
729
+ The total size of the file being cached.
730
+ maxblocks : int
731
+ The maximum number of blocks to cache for. The maximum memory
732
+ use for this cache is then ``blocksize * maxblocks``.
733
+ """
734
+
735
+ name: ClassVar[str] = "background"
736
+
737
+ def __init__(
738
+ self, blocksize: int, fetcher: Fetcher, size: int, maxblocks: int = 32
739
+ ) -> None:
740
+ super().__init__(blocksize, fetcher, size)
741
+ self.nblocks = math.ceil(size / blocksize)
742
+ self.maxblocks = maxblocks
743
+ self._fetch_block_cached = UpdatableLRU(self._fetch_block, maxblocks)
744
+
745
+ self._thread_executor = ThreadPoolExecutor(max_workers=1)
746
+ self._fetch_future_block_number: int | None = None
747
+ self._fetch_future: Future[bytes] | None = None
748
+ self._fetch_future_lock = threading.Lock()
749
+
750
+ def cache_info(self) -> UpdatableLRU.CacheInfo:
751
+ """
752
+ The statistics on the block cache.
753
+
754
+ Returns
755
+ -------
756
+ NamedTuple
757
+ Returned directly from the LRU Cache used internally.
758
+ """
759
+ return self._fetch_block_cached.cache_info()
760
+
761
+ def __getstate__(self) -> dict[str, Any]:
762
+ state = self.__dict__
763
+ del state["_fetch_block_cached"]
764
+ del state["_thread_executor"]
765
+ del state["_fetch_future_block_number"]
766
+ del state["_fetch_future"]
767
+ del state["_fetch_future_lock"]
768
+ return state
769
+
770
+ def __setstate__(self, state) -> None:
771
+ self.__dict__.update(state)
772
+ self._fetch_block_cached = UpdatableLRU(self._fetch_block, state["maxblocks"])
773
+ self._thread_executor = ThreadPoolExecutor(max_workers=1)
774
+ self._fetch_future_block_number = None
775
+ self._fetch_future = None
776
+ self._fetch_future_lock = threading.Lock()
777
+
778
+ def _fetch(self, start: int | None, end: int | None) -> bytes:
779
+ if start is None:
780
+ start = 0
781
+ if end is None:
782
+ end = self.size
783
+ if start >= self.size or start >= end:
784
+ return b""
785
+
786
+ # byte position -> block numbers
787
+ start_block_number = start // self.blocksize
788
+ end_block_number = end // self.blocksize
789
+
790
+ fetch_future_block_number = None
791
+ fetch_future = None
792
+ with self._fetch_future_lock:
793
+ # Background thread is running. Check we we can or must join it.
794
+ if self._fetch_future is not None:
795
+ assert self._fetch_future_block_number is not None
796
+ if self._fetch_future.done():
797
+ logger.info("BlockCache joined background fetch without waiting.")
798
+ self._fetch_block_cached.add_key(
799
+ self._fetch_future.result(), self._fetch_future_block_number
800
+ )
801
+ # Cleanup the fetch variables. Done with fetching the block.
802
+ self._fetch_future_block_number = None
803
+ self._fetch_future = None
804
+ else:
805
+ # Must join if we need the block for the current fetch
806
+ must_join = bool(
807
+ start_block_number
808
+ <= self._fetch_future_block_number
809
+ <= end_block_number
810
+ )
811
+ if must_join:
812
+ # Copy to the local variables to release lock
813
+ # before waiting for result
814
+ fetch_future_block_number = self._fetch_future_block_number
815
+ fetch_future = self._fetch_future
816
+
817
+ # Cleanup the fetch variables. Have a local copy.
818
+ self._fetch_future_block_number = None
819
+ self._fetch_future = None
820
+
821
+ # Need to wait for the future for the current read
822
+ if fetch_future is not None:
823
+ logger.info("BlockCache waiting for background fetch.")
824
+ # Wait until result and put it in cache
825
+ self._fetch_block_cached.add_key(
826
+ fetch_future.result(), fetch_future_block_number
827
+ )
828
+
829
+ # these are cached, so safe to do multiple calls for the same start and end.
830
+ for block_number in range(start_block_number, end_block_number + 1):
831
+ self._fetch_block_cached(block_number)
832
+
833
+ # fetch next block in the background if nothing is running in the background,
834
+ # the block is within file and it is not already cached
835
+ end_block_plus_1 = end_block_number + 1
836
+ with self._fetch_future_lock:
837
+ if (
838
+ self._fetch_future is None
839
+ and end_block_plus_1 <= self.nblocks
840
+ and not self._fetch_block_cached.is_key_cached(end_block_plus_1)
841
+ ):
842
+ self._fetch_future_block_number = end_block_plus_1
843
+ self._fetch_future = self._thread_executor.submit(
844
+ self._fetch_block, end_block_plus_1, "async"
845
+ )
846
+
847
+ return self._read_cache(
848
+ start,
849
+ end,
850
+ start_block_number=start_block_number,
851
+ end_block_number=end_block_number,
852
+ )
853
+
854
+ def _fetch_block(self, block_number: int, log_info: str = "sync") -> bytes:
855
+ """
856
+ Fetch the block of data for `block_number`.
857
+ """
858
+ if block_number > self.nblocks:
859
+ raise ValueError(
860
+ f"'block_number={block_number}' is greater than "
861
+ f"the number of blocks ({self.nblocks})"
862
+ )
863
+
864
+ start = block_number * self.blocksize
865
+ end = start + self.blocksize
866
+ logger.info("BlockCache fetching block (%s) %d", log_info, block_number)
867
+ self.total_requested_bytes += end - start
868
+ self.miss_count += 1
869
+ block_contents = super()._fetch(start, end)
870
+ return block_contents
871
+
872
+ def _read_cache(
873
+ self, start: int, end: int, start_block_number: int, end_block_number: int
874
+ ) -> bytes:
875
+ """
876
+ Read from our block cache.
877
+
878
+ Parameters
879
+ ----------
880
+ start, end : int
881
+ The start and end byte positions.
882
+ start_block_number, end_block_number : int
883
+ The start and end block numbers.
884
+ """
885
+ start_pos = start % self.blocksize
886
+ end_pos = end % self.blocksize
887
+
888
+ # kind of pointless to count this as a hit, but it is
889
+ self.hit_count += 1
890
+
891
+ if start_block_number == end_block_number:
892
+ block = self._fetch_block_cached(start_block_number)
893
+ return block[start_pos:end_pos]
894
+
895
+ else:
896
+ # read from the initial
897
+ out = [self._fetch_block_cached(start_block_number)[start_pos:]]
898
+
899
+ # intermediate blocks
900
+ # Note: it'd be nice to combine these into one big request. However
901
+ # that doesn't play nicely with our LRU cache.
902
+ out.extend(
903
+ map(
904
+ self._fetch_block_cached,
905
+ range(start_block_number + 1, end_block_number),
906
+ )
907
+ )
908
+
909
+ # final block
910
+ out.append(self._fetch_block_cached(end_block_number)[:end_pos])
911
+
912
+ return b"".join(out)
913
+
914
+
915
+ caches: dict[str | None, type[BaseCache]] = {
916
+ # one custom case
917
+ None: BaseCache,
918
+ }
919
+
920
+
921
+ def register_cache(cls: type[BaseCache], clobber: bool = False) -> None:
922
+ """'Register' cache implementation.
923
+
924
+ Parameters
925
+ ----------
926
+ clobber: bool, optional
927
+ If set to True (default is False) - allow to overwrite existing
928
+ entry.
929
+
930
+ Raises
931
+ ------
932
+ ValueError
933
+ """
934
+ name = cls.name
935
+ if not clobber and name in caches:
936
+ raise ValueError(f"Cache with name {name!r} is already known: {caches[name]}")
937
+ caches[name] = cls
938
+
939
+
940
+ for c in (
941
+ BaseCache,
942
+ MMapCache,
943
+ BytesCache,
944
+ ReadAheadCache,
945
+ BlockCache,
946
+ FirstChunkCache,
947
+ AllBytes,
948
+ KnownPartsOfAFile,
949
+ BackgroundBlockCache,
950
+ ):
951
+ register_cache(c)
parrot/lib/python3.10/site-packages/fsspec/callbacks.py ADDED
@@ -0,0 +1,324 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import wraps
2
+
3
+
4
+ class Callback:
5
+ """
6
+ Base class and interface for callback mechanism
7
+
8
+ This class can be used directly for monitoring file transfers by
9
+ providing ``callback=Callback(hooks=...)`` (see the ``hooks`` argument,
10
+ below), or subclassed for more specialised behaviour.
11
+
12
+ Parameters
13
+ ----------
14
+ size: int (optional)
15
+ Nominal quantity for the value that corresponds to a complete
16
+ transfer, e.g., total number of tiles or total number of
17
+ bytes
18
+ value: int (0)
19
+ Starting internal counter value
20
+ hooks: dict or None
21
+ A dict of named functions to be called on each update. The signature
22
+ of these must be ``f(size, value, **kwargs)``
23
+ """
24
+
25
+ def __init__(self, size=None, value=0, hooks=None, **kwargs):
26
+ self.size = size
27
+ self.value = value
28
+ self.hooks = hooks or {}
29
+ self.kw = kwargs
30
+
31
+ def __enter__(self):
32
+ return self
33
+
34
+ def __exit__(self, *exc_args):
35
+ self.close()
36
+
37
+ def close(self):
38
+ """Close callback."""
39
+
40
+ def branched(self, path_1, path_2, **kwargs):
41
+ """
42
+ Return callback for child transfers
43
+
44
+ If this callback is operating at a higher level, e.g., put, which may
45
+ trigger transfers that can also be monitored. The function returns a callback
46
+ that has to be passed to the child method, e.g., put_file,
47
+ as `callback=` argument.
48
+
49
+ The implementation uses `callback.branch` for compatibility.
50
+ When implementing callbacks, it is recommended to override this function instead
51
+ of `branch` and avoid calling `super().branched(...)`.
52
+
53
+ Prefer using this function over `branch`.
54
+
55
+ Parameters
56
+ ----------
57
+ path_1: str
58
+ Child's source path
59
+ path_2: str
60
+ Child's destination path
61
+ **kwargs:
62
+ Arbitrary keyword arguments
63
+
64
+ Returns
65
+ -------
66
+ callback: Callback
67
+ A callback instance to be passed to the child method
68
+ """
69
+ self.branch(path_1, path_2, kwargs)
70
+ # mutate kwargs so that we can force the caller to pass "callback=" explicitly
71
+ return kwargs.pop("callback", DEFAULT_CALLBACK)
72
+
73
+ def branch_coro(self, fn):
74
+ """
75
+ Wraps a coroutine, and pass a new child callback to it.
76
+ """
77
+
78
+ @wraps(fn)
79
+ async def func(path1, path2: str, **kwargs):
80
+ with self.branched(path1, path2, **kwargs) as child:
81
+ return await fn(path1, path2, callback=child, **kwargs)
82
+
83
+ return func
84
+
85
+ def set_size(self, size):
86
+ """
87
+ Set the internal maximum size attribute
88
+
89
+ Usually called if not initially set at instantiation. Note that this
90
+ triggers a ``call()``.
91
+
92
+ Parameters
93
+ ----------
94
+ size: int
95
+ """
96
+ self.size = size
97
+ self.call()
98
+
99
+ def absolute_update(self, value):
100
+ """
101
+ Set the internal value state
102
+
103
+ Triggers ``call()``
104
+
105
+ Parameters
106
+ ----------
107
+ value: int
108
+ """
109
+ self.value = value
110
+ self.call()
111
+
112
+ def relative_update(self, inc=1):
113
+ """
114
+ Delta increment the internal counter
115
+
116
+ Triggers ``call()``
117
+
118
+ Parameters
119
+ ----------
120
+ inc: int
121
+ """
122
+ self.value += inc
123
+ self.call()
124
+
125
+ def call(self, hook_name=None, **kwargs):
126
+ """
127
+ Execute hook(s) with current state
128
+
129
+ Each function is passed the internal size and current value
130
+
131
+ Parameters
132
+ ----------
133
+ hook_name: str or None
134
+ If given, execute on this hook
135
+ kwargs: passed on to (all) hook(s)
136
+ """
137
+ if not self.hooks:
138
+ return
139
+ kw = self.kw.copy()
140
+ kw.update(kwargs)
141
+ if hook_name:
142
+ if hook_name not in self.hooks:
143
+ return
144
+ return self.hooks[hook_name](self.size, self.value, **kw)
145
+ for hook in self.hooks.values() or []:
146
+ hook(self.size, self.value, **kw)
147
+
148
+ def wrap(self, iterable):
149
+ """
150
+ Wrap an iterable to call ``relative_update`` on each iterations
151
+
152
+ Parameters
153
+ ----------
154
+ iterable: Iterable
155
+ The iterable that is being wrapped
156
+ """
157
+ for item in iterable:
158
+ self.relative_update()
159
+ yield item
160
+
161
+ def branch(self, path_1, path_2, kwargs):
162
+ """
163
+ Set callbacks for child transfers
164
+
165
+ If this callback is operating at a higher level, e.g., put, which may
166
+ trigger transfers that can also be monitored. The passed kwargs are
167
+ to be *mutated* to add ``callback=``, if this class supports branching
168
+ to children.
169
+
170
+ Parameters
171
+ ----------
172
+ path_1: str
173
+ Child's source path
174
+ path_2: str
175
+ Child's destination path
176
+ kwargs: dict
177
+ arguments passed to child method, e.g., put_file.
178
+
179
+ Returns
180
+ -------
181
+
182
+ """
183
+ return None
184
+
185
+ def no_op(self, *_, **__):
186
+ pass
187
+
188
+ def __getattr__(self, item):
189
+ """
190
+ If undefined methods are called on this class, nothing happens
191
+ """
192
+ return self.no_op
193
+
194
+ @classmethod
195
+ def as_callback(cls, maybe_callback=None):
196
+ """Transform callback=... into Callback instance
197
+
198
+ For the special value of ``None``, return the global instance of
199
+ ``NoOpCallback``. This is an alternative to including
200
+ ``callback=DEFAULT_CALLBACK`` directly in a method signature.
201
+ """
202
+ if maybe_callback is None:
203
+ return DEFAULT_CALLBACK
204
+ return maybe_callback
205
+
206
+
207
+ class NoOpCallback(Callback):
208
+ """
209
+ This implementation of Callback does exactly nothing
210
+ """
211
+
212
+ def call(self, *args, **kwargs):
213
+ return None
214
+
215
+
216
+ class DotPrinterCallback(Callback):
217
+ """
218
+ Simple example Callback implementation
219
+
220
+ Almost identical to Callback with a hook that prints a char; here we
221
+ demonstrate how the outer layer may print "#" and the inner layer "."
222
+ """
223
+
224
+ def __init__(self, chr_to_print="#", **kwargs):
225
+ self.chr = chr_to_print
226
+ super().__init__(**kwargs)
227
+
228
+ def branch(self, path_1, path_2, kwargs):
229
+ """Mutate kwargs to add new instance with different print char"""
230
+ kwargs["callback"] = DotPrinterCallback(".")
231
+
232
+ def call(self, **kwargs):
233
+ """Just outputs a character"""
234
+ print(self.chr, end="")
235
+
236
+
237
+ class TqdmCallback(Callback):
238
+ """
239
+ A callback to display a progress bar using tqdm
240
+
241
+ Parameters
242
+ ----------
243
+ tqdm_kwargs : dict, (optional)
244
+ Any argument accepted by the tqdm constructor.
245
+ See the `tqdm doc <https://tqdm.github.io/docs/tqdm/#__init__>`_.
246
+ Will be forwarded to `tqdm_cls`.
247
+ tqdm_cls: (optional)
248
+ subclass of `tqdm.tqdm`. If not passed, it will default to `tqdm.tqdm`.
249
+
250
+ Examples
251
+ --------
252
+ >>> import fsspec
253
+ >>> from fsspec.callbacks import TqdmCallback
254
+ >>> fs = fsspec.filesystem("memory")
255
+ >>> path2distant_data = "/your-path"
256
+ >>> fs.upload(
257
+ ".",
258
+ path2distant_data,
259
+ recursive=True,
260
+ callback=TqdmCallback(),
261
+ )
262
+
263
+ You can forward args to tqdm using the ``tqdm_kwargs`` parameter.
264
+
265
+ >>> fs.upload(
266
+ ".",
267
+ path2distant_data,
268
+ recursive=True,
269
+ callback=TqdmCallback(tqdm_kwargs={"desc": "Your tqdm description"}),
270
+ )
271
+
272
+ You can also customize the progress bar by passing a subclass of `tqdm`.
273
+
274
+ .. code-block:: python
275
+
276
+ class TqdmFormat(tqdm):
277
+ '''Provides a `total_time` format parameter'''
278
+ @property
279
+ def format_dict(self):
280
+ d = super().format_dict
281
+ total_time = d["elapsed"] * (d["total"] or 0) / max(d["n"], 1)
282
+ d.update(total_time=self.format_interval(total_time) + " in total")
283
+ return d
284
+
285
+ >>> with TqdmCallback(
286
+ tqdm_kwargs={
287
+ "desc": "desc",
288
+ "bar_format": "{total_time}: {percentage:.0f}%|{bar}{r_bar}",
289
+ },
290
+ tqdm_cls=TqdmFormat,
291
+ ) as callback:
292
+ fs.upload(".", path2distant_data, recursive=True, callback=callback)
293
+ """
294
+
295
+ def __init__(self, tqdm_kwargs=None, *args, **kwargs):
296
+ try:
297
+ from tqdm import tqdm
298
+
299
+ except ImportError as exce:
300
+ raise ImportError(
301
+ "Using TqdmCallback requires tqdm to be installed"
302
+ ) from exce
303
+
304
+ self._tqdm_cls = kwargs.pop("tqdm_cls", tqdm)
305
+ self._tqdm_kwargs = tqdm_kwargs or {}
306
+ self.tqdm = None
307
+ super().__init__(*args, **kwargs)
308
+
309
+ def call(self, *args, **kwargs):
310
+ if self.tqdm is None:
311
+ self.tqdm = self._tqdm_cls(total=self.size, **self._tqdm_kwargs)
312
+ self.tqdm.total = self.size
313
+ self.tqdm.update(self.value - self.tqdm.n)
314
+
315
+ def close(self):
316
+ if self.tqdm is not None:
317
+ self.tqdm.close()
318
+ self.tqdm = None
319
+
320
+ def __del__(self):
321
+ return self.close()
322
+
323
+
324
+ DEFAULT_CALLBACK = _DEFAULT_CALLBACK = NoOpCallback()
parrot/lib/python3.10/site-packages/fsspec/compression.py ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Helper functions for a standard streaming compression API"""
2
+
3
+ from zipfile import ZipFile
4
+
5
+ import fsspec.utils
6
+ from fsspec.spec import AbstractBufferedFile
7
+
8
+
9
+ def noop_file(file, mode, **kwargs):
10
+ return file
11
+
12
+
13
+ # TODO: files should also be available as contexts
14
+ # should be functions of the form func(infile, mode=, **kwargs) -> file-like
15
+ compr = {None: noop_file}
16
+
17
+
18
+ def register_compression(name, callback, extensions, force=False):
19
+ """Register an "inferable" file compression type.
20
+
21
+ Registers transparent file compression type for use with fsspec.open.
22
+ Compression can be specified by name in open, or "infer"-ed for any files
23
+ ending with the given extensions.
24
+
25
+ Args:
26
+ name: (str) The compression type name. Eg. "gzip".
27
+ callback: A callable of form (infile, mode, **kwargs) -> file-like.
28
+ Accepts an input file-like object, the target mode and kwargs.
29
+ Returns a wrapped file-like object.
30
+ extensions: (str, Iterable[str]) A file extension, or list of file
31
+ extensions for which to infer this compression scheme. Eg. "gz".
32
+ force: (bool) Force re-registration of compression type or extensions.
33
+
34
+ Raises:
35
+ ValueError: If name or extensions already registered, and not force.
36
+
37
+ """
38
+ if isinstance(extensions, str):
39
+ extensions = [extensions]
40
+
41
+ # Validate registration
42
+ if name in compr and not force:
43
+ raise ValueError(f"Duplicate compression registration: {name}")
44
+
45
+ for ext in extensions:
46
+ if ext in fsspec.utils.compressions and not force:
47
+ raise ValueError(f"Duplicate compression file extension: {ext} ({name})")
48
+
49
+ compr[name] = callback
50
+
51
+ for ext in extensions:
52
+ fsspec.utils.compressions[ext] = name
53
+
54
+
55
+ def unzip(infile, mode="rb", filename=None, **kwargs):
56
+ if "r" not in mode:
57
+ filename = filename or "file"
58
+ z = ZipFile(infile, mode="w", **kwargs)
59
+ fo = z.open(filename, mode="w")
60
+ fo.close = lambda closer=fo.close: closer() or z.close()
61
+ return fo
62
+ z = ZipFile(infile)
63
+ if filename is None:
64
+ filename = z.namelist()[0]
65
+ return z.open(filename, mode="r", **kwargs)
66
+
67
+
68
+ register_compression("zip", unzip, "zip")
69
+
70
+ try:
71
+ from bz2 import BZ2File
72
+ except ImportError:
73
+ pass
74
+ else:
75
+ register_compression("bz2", BZ2File, "bz2")
76
+
77
+ try: # pragma: no cover
78
+ from isal import igzip
79
+
80
+ def isal(infile, mode="rb", **kwargs):
81
+ return igzip.IGzipFile(fileobj=infile, mode=mode, **kwargs)
82
+
83
+ register_compression("gzip", isal, "gz")
84
+ except ImportError:
85
+ from gzip import GzipFile
86
+
87
+ register_compression(
88
+ "gzip", lambda f, **kwargs: GzipFile(fileobj=f, **kwargs), "gz"
89
+ )
90
+
91
+ try:
92
+ from lzma import LZMAFile
93
+
94
+ register_compression("lzma", LZMAFile, "lzma")
95
+ register_compression("xz", LZMAFile, "xz")
96
+ except ImportError:
97
+ pass
98
+
99
+ try:
100
+ import lzmaffi
101
+
102
+ register_compression("lzma", lzmaffi.LZMAFile, "lzma", force=True)
103
+ register_compression("xz", lzmaffi.LZMAFile, "xz", force=True)
104
+ except ImportError:
105
+ pass
106
+
107
+
108
+ class SnappyFile(AbstractBufferedFile):
109
+ def __init__(self, infile, mode, **kwargs):
110
+ import snappy
111
+
112
+ super().__init__(
113
+ fs=None, path="snappy", mode=mode.strip("b") + "b", size=999999999, **kwargs
114
+ )
115
+ self.infile = infile
116
+ if "r" in mode:
117
+ self.codec = snappy.StreamDecompressor()
118
+ else:
119
+ self.codec = snappy.StreamCompressor()
120
+
121
+ def _upload_chunk(self, final=False):
122
+ self.buffer.seek(0)
123
+ out = self.codec.add_chunk(self.buffer.read())
124
+ self.infile.write(out)
125
+ return True
126
+
127
+ def seek(self, loc, whence=0):
128
+ raise NotImplementedError("SnappyFile is not seekable")
129
+
130
+ def seekable(self):
131
+ return False
132
+
133
+ def _fetch_range(self, start, end):
134
+ """Get the specified set of bytes from remote"""
135
+ data = self.infile.read(end - start)
136
+ return self.codec.decompress(data)
137
+
138
+
139
+ try:
140
+ import snappy
141
+
142
+ snappy.compress(b"")
143
+ # Snappy may use the .sz file extension, but this is not part of the
144
+ # standard implementation.
145
+ register_compression("snappy", SnappyFile, [])
146
+
147
+ except (ImportError, NameError, AttributeError):
148
+ pass
149
+
150
+ try:
151
+ import lz4.frame
152
+
153
+ register_compression("lz4", lz4.frame.open, "lz4")
154
+ except ImportError:
155
+ pass
156
+
157
+ try:
158
+ import zstandard as zstd
159
+
160
+ def zstandard_file(infile, mode="rb"):
161
+ if "r" in mode:
162
+ cctx = zstd.ZstdDecompressor()
163
+ return cctx.stream_reader(infile)
164
+ else:
165
+ cctx = zstd.ZstdCompressor(level=10)
166
+ return cctx.stream_writer(infile)
167
+
168
+ register_compression("zstd", zstandard_file, "zst")
169
+ except ImportError:
170
+ pass
171
+
172
+
173
+ def available_compressions():
174
+ """Return a list of the implemented compressions."""
175
+ return list(compr)
parrot/lib/python3.10/site-packages/fsspec/config.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import configparser
4
+ import json
5
+ import os
6
+ import warnings
7
+ from typing import Any
8
+
9
+ conf: dict[str, dict[str, Any]] = {}
10
+ default_conf_dir = os.path.join(os.path.expanduser("~"), ".config/fsspec")
11
+ conf_dir = os.environ.get("FSSPEC_CONFIG_DIR", default_conf_dir)
12
+
13
+
14
+ def set_conf_env(conf_dict, envdict=os.environ):
15
+ """Set config values from environment variables
16
+
17
+ Looks for variables of the form ``FSSPEC_<protocol>`` and
18
+ ``FSSPEC_<protocol>_<kwarg>``. For ``FSSPEC_<protocol>`` the value is parsed
19
+ as a json dictionary and used to ``update`` the config of the
20
+ corresponding protocol. For ``FSSPEC_<protocol>_<kwarg>`` there is no
21
+ attempt to convert the string value, but the kwarg keys will be lower-cased.
22
+
23
+ The ``FSSPEC_<protocol>_<kwarg>`` variables are applied after the
24
+ ``FSSPEC_<protocol>`` ones.
25
+
26
+ Parameters
27
+ ----------
28
+ conf_dict : dict(str, dict)
29
+ This dict will be mutated
30
+ envdict : dict-like(str, str)
31
+ Source for the values - usually the real environment
32
+ """
33
+ kwarg_keys = []
34
+ for key in envdict:
35
+ if key.startswith("FSSPEC_") and len(key) > 7 and key[7] != "_":
36
+ if key.count("_") > 1:
37
+ kwarg_keys.append(key)
38
+ continue
39
+ try:
40
+ value = json.loads(envdict[key])
41
+ except json.decoder.JSONDecodeError as ex:
42
+ warnings.warn(
43
+ f"Ignoring environment variable {key} due to a parse failure: {ex}"
44
+ )
45
+ else:
46
+ if isinstance(value, dict):
47
+ _, proto = key.split("_", 1)
48
+ conf_dict.setdefault(proto.lower(), {}).update(value)
49
+ else:
50
+ warnings.warn(
51
+ f"Ignoring environment variable {key} due to not being a dict:"
52
+ f" {type(value)}"
53
+ )
54
+ elif key.startswith("FSSPEC"):
55
+ warnings.warn(
56
+ f"Ignoring environment variable {key} due to having an unexpected name"
57
+ )
58
+
59
+ for key in kwarg_keys:
60
+ _, proto, kwarg = key.split("_", 2)
61
+ conf_dict.setdefault(proto.lower(), {})[kwarg.lower()] = envdict[key]
62
+
63
+
64
+ def set_conf_files(cdir, conf_dict):
65
+ """Set config values from files
66
+
67
+ Scans for INI and JSON files in the given dictionary, and uses their
68
+ contents to set the config. In case of repeated values, later values
69
+ win.
70
+
71
+ In the case of INI files, all values are strings, and these will not
72
+ be converted.
73
+
74
+ Parameters
75
+ ----------
76
+ cdir : str
77
+ Directory to search
78
+ conf_dict : dict(str, dict)
79
+ This dict will be mutated
80
+ """
81
+ if not os.path.isdir(cdir):
82
+ return
83
+ allfiles = sorted(os.listdir(cdir))
84
+ for fn in allfiles:
85
+ if fn.endswith(".ini"):
86
+ ini = configparser.ConfigParser()
87
+ ini.read(os.path.join(cdir, fn))
88
+ for key in ini:
89
+ if key == "DEFAULT":
90
+ continue
91
+ conf_dict.setdefault(key, {}).update(dict(ini[key]))
92
+ if fn.endswith(".json"):
93
+ with open(os.path.join(cdir, fn)) as f:
94
+ js = json.load(f)
95
+ for key in js:
96
+ conf_dict.setdefault(key, {}).update(dict(js[key]))
97
+
98
+
99
+ def apply_config(cls, kwargs, conf_dict=None):
100
+ """Supply default values for kwargs when instantiating class
101
+
102
+ Augments the passed kwargs, by finding entries in the config dict
103
+ which match the classes ``.protocol`` attribute (one or more str)
104
+
105
+ Parameters
106
+ ----------
107
+ cls : file system implementation
108
+ kwargs : dict
109
+ conf_dict : dict of dict
110
+ Typically this is the global configuration
111
+
112
+ Returns
113
+ -------
114
+ dict : the modified set of kwargs
115
+ """
116
+ if conf_dict is None:
117
+ conf_dict = conf
118
+ protos = cls.protocol if isinstance(cls.protocol, (tuple, list)) else [cls.protocol]
119
+ kw = {}
120
+ for proto in protos:
121
+ # default kwargs from the current state of the config
122
+ if proto in conf_dict:
123
+ kw.update(conf_dict[proto])
124
+ # explicit kwargs always win
125
+ kw.update(**kwargs)
126
+ kwargs = kw
127
+ return kwargs
128
+
129
+
130
+ set_conf_files(conf_dir, conf)
131
+ set_conf_env(conf)
parrot/lib/python3.10/site-packages/fsspec/conftest.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import shutil
3
+ import subprocess
4
+ import sys
5
+ import time
6
+
7
+ import pytest
8
+
9
+ import fsspec
10
+ from fsspec.implementations.cached import CachingFileSystem
11
+
12
+
13
+ @pytest.fixture()
14
+ def m():
15
+ """
16
+ Fixture providing a memory filesystem.
17
+ """
18
+ m = fsspec.filesystem("memory")
19
+ m.store.clear()
20
+ m.pseudo_dirs.clear()
21
+ m.pseudo_dirs.append("")
22
+ try:
23
+ yield m
24
+ finally:
25
+ m.store.clear()
26
+ m.pseudo_dirs.clear()
27
+ m.pseudo_dirs.append("")
28
+
29
+
30
+ @pytest.fixture
31
+ def ftp_writable(tmpdir):
32
+ """
33
+ Fixture providing a writable FTP filesystem.
34
+ """
35
+ pytest.importorskip("pyftpdlib")
36
+ from fsspec.implementations.ftp import FTPFileSystem
37
+
38
+ FTPFileSystem.clear_instance_cache() # remove lingering connections
39
+ CachingFileSystem.clear_instance_cache()
40
+ d = str(tmpdir)
41
+ with open(os.path.join(d, "out"), "wb") as f:
42
+ f.write(b"hello" * 10000)
43
+ P = subprocess.Popen(
44
+ [sys.executable, "-m", "pyftpdlib", "-d", d, "-u", "user", "-P", "pass", "-w"]
45
+ )
46
+ try:
47
+ time.sleep(1)
48
+ yield "localhost", 2121, "user", "pass"
49
+ finally:
50
+ P.terminate()
51
+ P.wait()
52
+ try:
53
+ shutil.rmtree(tmpdir)
54
+ except Exception:
55
+ pass
parrot/lib/python3.10/site-packages/fsspec/core.py ADDED
@@ -0,0 +1,738 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import io
4
+ import logging
5
+ import os
6
+ import re
7
+ from glob import has_magic
8
+ from pathlib import Path
9
+
10
+ # for backwards compat, we export cache things from here too
11
+ from fsspec.caching import ( # noqa: F401
12
+ BaseCache,
13
+ BlockCache,
14
+ BytesCache,
15
+ MMapCache,
16
+ ReadAheadCache,
17
+ caches,
18
+ )
19
+ from fsspec.compression import compr
20
+ from fsspec.config import conf
21
+ from fsspec.registry import filesystem, get_filesystem_class
22
+ from fsspec.utils import (
23
+ _unstrip_protocol,
24
+ build_name_function,
25
+ infer_compression,
26
+ stringify_path,
27
+ )
28
+
29
+ logger = logging.getLogger("fsspec")
30
+
31
+
32
+ class OpenFile:
33
+ """
34
+ File-like object to be used in a context
35
+
36
+ Can layer (buffered) text-mode and compression over any file-system, which
37
+ are typically binary-only.
38
+
39
+ These instances are safe to serialize, as the low-level file object
40
+ is not created until invoked using ``with``.
41
+
42
+ Parameters
43
+ ----------
44
+ fs: FileSystem
45
+ The file system to use for opening the file. Should be a subclass or duck-type
46
+ with ``fsspec.spec.AbstractFileSystem``
47
+ path: str
48
+ Location to open
49
+ mode: str like 'rb', optional
50
+ Mode of the opened file
51
+ compression: str or None, optional
52
+ Compression to apply
53
+ encoding: str or None, optional
54
+ The encoding to use if opened in text mode.
55
+ errors: str or None, optional
56
+ How to handle encoding errors if opened in text mode.
57
+ newline: None or str
58
+ Passed to TextIOWrapper in text mode, how to handle line endings.
59
+ autoopen: bool
60
+ If True, calls open() immediately. Mostly used by pickle
61
+ pos: int
62
+ If given and autoopen is True, seek to this location immediately
63
+ """
64
+
65
+ def __init__(
66
+ self,
67
+ fs,
68
+ path,
69
+ mode="rb",
70
+ compression=None,
71
+ encoding=None,
72
+ errors=None,
73
+ newline=None,
74
+ ):
75
+ self.fs = fs
76
+ self.path = path
77
+ self.mode = mode
78
+ self.compression = get_compression(path, compression)
79
+ self.encoding = encoding
80
+ self.errors = errors
81
+ self.newline = newline
82
+ self.fobjects = []
83
+
84
+ def __reduce__(self):
85
+ return (
86
+ OpenFile,
87
+ (
88
+ self.fs,
89
+ self.path,
90
+ self.mode,
91
+ self.compression,
92
+ self.encoding,
93
+ self.errors,
94
+ self.newline,
95
+ ),
96
+ )
97
+
98
+ def __repr__(self):
99
+ return f"<OpenFile '{self.path}'>"
100
+
101
+ def __enter__(self):
102
+ mode = self.mode.replace("t", "").replace("b", "") + "b"
103
+
104
+ try:
105
+ f = self.fs.open(self.path, mode=mode)
106
+ except FileNotFoundError as e:
107
+ if has_magic(self.path):
108
+ raise FileNotFoundError(
109
+ "%s not found. The URL contains glob characters: you maybe needed\n"
110
+ "to pass expand=True in fsspec.open() or the storage_options of \n"
111
+ "your library. You can also set the config value 'open_expand'\n"
112
+ "before import, or fsspec.core.DEFAULT_EXPAND at runtime, to True.",
113
+ self.path,
114
+ ) from e
115
+ raise
116
+
117
+ self.fobjects = [f]
118
+
119
+ if self.compression is not None:
120
+ compress = compr[self.compression]
121
+ f = compress(f, mode=mode[0])
122
+ self.fobjects.append(f)
123
+
124
+ if "b" not in self.mode:
125
+ # assume, for example, that 'r' is equivalent to 'rt' as in builtin
126
+ f = PickleableTextIOWrapper(
127
+ f, encoding=self.encoding, errors=self.errors, newline=self.newline
128
+ )
129
+ self.fobjects.append(f)
130
+
131
+ return self.fobjects[-1]
132
+
133
+ def __exit__(self, *args):
134
+ self.close()
135
+
136
+ @property
137
+ def full_name(self):
138
+ return _unstrip_protocol(self.path, self.fs)
139
+
140
+ def open(self):
141
+ """Materialise this as a real open file without context
142
+
143
+ The OpenFile object should be explicitly closed to avoid enclosed file
144
+ instances persisting. You must, therefore, keep a reference to the OpenFile
145
+ during the life of the file-like it generates.
146
+ """
147
+ return self.__enter__()
148
+
149
+ def close(self):
150
+ """Close all encapsulated file objects"""
151
+ for f in reversed(self.fobjects):
152
+ if "r" not in self.mode and not f.closed:
153
+ f.flush()
154
+ f.close()
155
+ self.fobjects.clear()
156
+
157
+
158
+ class OpenFiles(list):
159
+ """List of OpenFile instances
160
+
161
+ Can be used in a single context, which opens and closes all of the
162
+ contained files. Normal list access to get the elements works as
163
+ normal.
164
+
165
+ A special case is made for caching filesystems - the files will
166
+ be down/uploaded together at the start or end of the context, and
167
+ this may happen concurrently, if the target filesystem supports it.
168
+ """
169
+
170
+ def __init__(self, *args, mode="rb", fs=None):
171
+ self.mode = mode
172
+ self.fs = fs
173
+ self.files = []
174
+ super().__init__(*args)
175
+
176
+ def __enter__(self):
177
+ if self.fs is None:
178
+ raise ValueError("Context has already been used")
179
+
180
+ fs = self.fs
181
+ while True:
182
+ if hasattr(fs, "open_many"):
183
+ # check for concurrent cache download; or set up for upload
184
+ self.files = fs.open_many(self)
185
+ return self.files
186
+ if hasattr(fs, "fs") and fs.fs is not None:
187
+ fs = fs.fs
188
+ else:
189
+ break
190
+ return [s.__enter__() for s in self]
191
+
192
+ def __exit__(self, *args):
193
+ fs = self.fs
194
+ [s.__exit__(*args) for s in self]
195
+ if "r" not in self.mode:
196
+ while True:
197
+ if hasattr(fs, "open_many"):
198
+ # check for concurrent cache upload
199
+ fs.commit_many(self.files)
200
+ return
201
+ if hasattr(fs, "fs") and fs.fs is not None:
202
+ fs = fs.fs
203
+ else:
204
+ break
205
+
206
+ def __getitem__(self, item):
207
+ out = super().__getitem__(item)
208
+ if isinstance(item, slice):
209
+ return OpenFiles(out, mode=self.mode, fs=self.fs)
210
+ return out
211
+
212
+ def __repr__(self):
213
+ return f"<List of {len(self)} OpenFile instances>"
214
+
215
+
216
+ def open_files(
217
+ urlpath,
218
+ mode="rb",
219
+ compression=None,
220
+ encoding="utf8",
221
+ errors=None,
222
+ name_function=None,
223
+ num=1,
224
+ protocol=None,
225
+ newline=None,
226
+ auto_mkdir=True,
227
+ expand=True,
228
+ **kwargs,
229
+ ):
230
+ """Given a path or paths, return a list of ``OpenFile`` objects.
231
+
232
+ For writing, a str path must contain the "*" character, which will be filled
233
+ in by increasing numbers, e.g., "part*" -> "part1", "part2" if num=2.
234
+
235
+ For either reading or writing, can instead provide explicit list of paths.
236
+
237
+ Parameters
238
+ ----------
239
+ urlpath: string or list
240
+ Absolute or relative filepath(s). Prefix with a protocol like ``s3://``
241
+ to read from alternative filesystems. To read from multiple files you
242
+ can pass a globstring or a list of paths, with the caveat that they
243
+ must all have the same protocol.
244
+ mode: 'rb', 'wt', etc.
245
+ compression: string or None
246
+ If given, open file using compression codec. Can either be a compression
247
+ name (a key in ``fsspec.compression.compr``) or "infer" to guess the
248
+ compression from the filename suffix.
249
+ encoding: str
250
+ For text mode only
251
+ errors: None or str
252
+ Passed to TextIOWrapper in text mode
253
+ name_function: function or None
254
+ if opening a set of files for writing, those files do not yet exist,
255
+ so we need to generate their names by formatting the urlpath for
256
+ each sequence number
257
+ num: int [1]
258
+ if writing mode, number of files we expect to create (passed to
259
+ name+function)
260
+ protocol: str or None
261
+ If given, overrides the protocol found in the URL.
262
+ newline: bytes or None
263
+ Used for line terminator in text mode. If None, uses system default;
264
+ if blank, uses no translation.
265
+ auto_mkdir: bool (True)
266
+ If in write mode, this will ensure the target directory exists before
267
+ writing, by calling ``fs.mkdirs(exist_ok=True)``.
268
+ expand: bool
269
+ **kwargs: dict
270
+ Extra options that make sense to a particular storage connection, e.g.
271
+ host, port, username, password, etc.
272
+
273
+ Examples
274
+ --------
275
+ >>> files = open_files('2015-*-*.csv') # doctest: +SKIP
276
+ >>> files = open_files(
277
+ ... 's3://bucket/2015-*-*.csv.gz', compression='gzip'
278
+ ... ) # doctest: +SKIP
279
+
280
+ Returns
281
+ -------
282
+ An ``OpenFiles`` instance, which is a list of ``OpenFile`` objects that can
283
+ be used as a single context
284
+
285
+ Notes
286
+ -----
287
+ For a full list of the available protocols and the implementations that
288
+ they map across to see the latest online documentation:
289
+
290
+ - For implementations built into ``fsspec`` see
291
+ https://filesystem-spec.readthedocs.io/en/latest/api.html#built-in-implementations
292
+ - For implementations in separate packages see
293
+ https://filesystem-spec.readthedocs.io/en/latest/api.html#other-known-implementations
294
+ """
295
+ fs, fs_token, paths = get_fs_token_paths(
296
+ urlpath,
297
+ mode,
298
+ num=num,
299
+ name_function=name_function,
300
+ storage_options=kwargs,
301
+ protocol=protocol,
302
+ expand=expand,
303
+ )
304
+ if fs.protocol == "file":
305
+ fs.auto_mkdir = auto_mkdir
306
+ elif "r" not in mode and auto_mkdir:
307
+ parents = {fs._parent(path) for path in paths}
308
+ for parent in parents:
309
+ try:
310
+ fs.makedirs(parent, exist_ok=True)
311
+ except PermissionError:
312
+ pass
313
+ return OpenFiles(
314
+ [
315
+ OpenFile(
316
+ fs,
317
+ path,
318
+ mode=mode,
319
+ compression=compression,
320
+ encoding=encoding,
321
+ errors=errors,
322
+ newline=newline,
323
+ )
324
+ for path in paths
325
+ ],
326
+ mode=mode,
327
+ fs=fs,
328
+ )
329
+
330
+
331
+ def _un_chain(path, kwargs):
332
+ x = re.compile(".*[^a-z]+.*") # test for non protocol-like single word
333
+ bits = (
334
+ [p if "://" in p or x.match(p) else p + "://" for p in path.split("::")]
335
+ if "::" in path
336
+ else [path]
337
+ )
338
+ # [[url, protocol, kwargs], ...]
339
+ out = []
340
+ previous_bit = None
341
+ kwargs = kwargs.copy()
342
+ for bit in reversed(bits):
343
+ protocol = kwargs.pop("protocol", None) or split_protocol(bit)[0] or "file"
344
+ cls = get_filesystem_class(protocol)
345
+ extra_kwargs = cls._get_kwargs_from_urls(bit)
346
+ kws = kwargs.pop(protocol, {})
347
+ if bit is bits[0]:
348
+ kws.update(kwargs)
349
+ kw = dict(**extra_kwargs, **kws)
350
+ bit = cls._strip_protocol(bit)
351
+ if (
352
+ protocol in {"blockcache", "filecache", "simplecache"}
353
+ and "target_protocol" not in kw
354
+ ):
355
+ bit = previous_bit
356
+ out.append((bit, protocol, kw))
357
+ previous_bit = bit
358
+ out.reverse()
359
+ return out
360
+
361
+
362
+ def url_to_fs(url, **kwargs):
363
+ """
364
+ Turn fully-qualified and potentially chained URL into filesystem instance
365
+
366
+ Parameters
367
+ ----------
368
+ url : str
369
+ The fsspec-compatible URL
370
+ **kwargs: dict
371
+ Extra options that make sense to a particular storage connection, e.g.
372
+ host, port, username, password, etc.
373
+
374
+ Returns
375
+ -------
376
+ filesystem : FileSystem
377
+ The new filesystem discovered from ``url`` and created with
378
+ ``**kwargs``.
379
+ urlpath : str
380
+ The file-systems-specific URL for ``url``.
381
+ """
382
+ url = stringify_path(url)
383
+ # non-FS arguments that appear in fsspec.open()
384
+ # inspect could keep this in sync with open()'s signature
385
+ known_kwargs = {
386
+ "compression",
387
+ "encoding",
388
+ "errors",
389
+ "expand",
390
+ "mode",
391
+ "name_function",
392
+ "newline",
393
+ "num",
394
+ }
395
+ kwargs = {k: v for k, v in kwargs.items() if k not in known_kwargs}
396
+ chain = _un_chain(url, kwargs)
397
+ inkwargs = {}
398
+ # Reverse iterate the chain, creating a nested target_* structure
399
+ for i, ch in enumerate(reversed(chain)):
400
+ urls, protocol, kw = ch
401
+ if i == len(chain) - 1:
402
+ inkwargs = dict(**kw, **inkwargs)
403
+ continue
404
+ inkwargs["target_options"] = dict(**kw, **inkwargs)
405
+ inkwargs["target_protocol"] = protocol
406
+ inkwargs["fo"] = urls
407
+ urlpath, protocol, _ = chain[0]
408
+ fs = filesystem(protocol, **inkwargs)
409
+ return fs, urlpath
410
+
411
+
412
+ DEFAULT_EXPAND = conf.get("open_expand", False)
413
+
414
+
415
+ def open(
416
+ urlpath,
417
+ mode="rb",
418
+ compression=None,
419
+ encoding="utf8",
420
+ errors=None,
421
+ protocol=None,
422
+ newline=None,
423
+ expand=None,
424
+ **kwargs,
425
+ ):
426
+ """Given a path or paths, return one ``OpenFile`` object.
427
+
428
+ Parameters
429
+ ----------
430
+ urlpath: string or list
431
+ Absolute or relative filepath. Prefix with a protocol like ``s3://``
432
+ to read from alternative filesystems. Should not include glob
433
+ character(s).
434
+ mode: 'rb', 'wt', etc.
435
+ compression: string or None
436
+ If given, open file using compression codec. Can either be a compression
437
+ name (a key in ``fsspec.compression.compr``) or "infer" to guess the
438
+ compression from the filename suffix.
439
+ encoding: str
440
+ For text mode only
441
+ errors: None or str
442
+ Passed to TextIOWrapper in text mode
443
+ protocol: str or None
444
+ If given, overrides the protocol found in the URL.
445
+ newline: bytes or None
446
+ Used for line terminator in text mode. If None, uses system default;
447
+ if blank, uses no translation.
448
+ expand: bool or Nonw
449
+ Whether to regard file paths containing special glob characters as needing
450
+ expansion (finding the first match) or absolute. Setting False allows using
451
+ paths which do embed such characters. If None (default), this argument
452
+ takes its value from the DEFAULT_EXPAND module variable, which takes
453
+ its initial value from the "open_expand" config value at startup, which will
454
+ be False if not set.
455
+ **kwargs: dict
456
+ Extra options that make sense to a particular storage connection, e.g.
457
+ host, port, username, password, etc.
458
+
459
+ Examples
460
+ --------
461
+ >>> openfile = open('2015-01-01.csv') # doctest: +SKIP
462
+ >>> openfile = open(
463
+ ... 's3://bucket/2015-01-01.csv.gz', compression='gzip'
464
+ ... ) # doctest: +SKIP
465
+ >>> with openfile as f:
466
+ ... df = pd.read_csv(f) # doctest: +SKIP
467
+ ...
468
+
469
+ Returns
470
+ -------
471
+ ``OpenFile`` object.
472
+
473
+ Notes
474
+ -----
475
+ For a full list of the available protocols and the implementations that
476
+ they map across to see the latest online documentation:
477
+
478
+ - For implementations built into ``fsspec`` see
479
+ https://filesystem-spec.readthedocs.io/en/latest/api.html#built-in-implementations
480
+ - For implementations in separate packages see
481
+ https://filesystem-spec.readthedocs.io/en/latest/api.html#other-known-implementations
482
+ """
483
+ expand = DEFAULT_EXPAND if expand is None else expand
484
+ out = open_files(
485
+ urlpath=[urlpath],
486
+ mode=mode,
487
+ compression=compression,
488
+ encoding=encoding,
489
+ errors=errors,
490
+ protocol=protocol,
491
+ newline=newline,
492
+ expand=expand,
493
+ **kwargs,
494
+ )
495
+ if not out:
496
+ raise FileNotFoundError(urlpath)
497
+ return out[0]
498
+
499
+
500
+ def open_local(
501
+ url: str | list[str] | Path | list[Path],
502
+ mode: str = "rb",
503
+ **storage_options: dict,
504
+ ) -> str | list[str]:
505
+ """Open file(s) which can be resolved to local
506
+
507
+ For files which either are local, or get downloaded upon open
508
+ (e.g., by file caching)
509
+
510
+ Parameters
511
+ ----------
512
+ url: str or list(str)
513
+ mode: str
514
+ Must be read mode
515
+ storage_options:
516
+ passed on to FS for or used by open_files (e.g., compression)
517
+ """
518
+ if "r" not in mode:
519
+ raise ValueError("Can only ensure local files when reading")
520
+ of = open_files(url, mode=mode, **storage_options)
521
+ if not getattr(of[0].fs, "local_file", False):
522
+ raise ValueError(
523
+ "open_local can only be used on a filesystem which"
524
+ " has attribute local_file=True"
525
+ )
526
+ with of as files:
527
+ paths = [f.name for f in files]
528
+ if (isinstance(url, str) and not has_magic(url)) or isinstance(url, Path):
529
+ return paths[0]
530
+ return paths
531
+
532
+
533
+ def get_compression(urlpath, compression):
534
+ if compression == "infer":
535
+ compression = infer_compression(urlpath)
536
+ if compression is not None and compression not in compr:
537
+ raise ValueError(f"Compression type {compression} not supported")
538
+ return compression
539
+
540
+
541
+ def split_protocol(urlpath):
542
+ """Return protocol, path pair"""
543
+ urlpath = stringify_path(urlpath)
544
+ if "://" in urlpath:
545
+ protocol, path = urlpath.split("://", 1)
546
+ if len(protocol) > 1:
547
+ # excludes Windows paths
548
+ return protocol, path
549
+ if urlpath.startswith("data:"):
550
+ return urlpath.split(":", 1)
551
+ return None, urlpath
552
+
553
+
554
+ def strip_protocol(urlpath):
555
+ """Return only path part of full URL, according to appropriate backend"""
556
+ protocol, _ = split_protocol(urlpath)
557
+ cls = get_filesystem_class(protocol)
558
+ return cls._strip_protocol(urlpath)
559
+
560
+
561
+ def expand_paths_if_needed(paths, mode, num, fs, name_function):
562
+ """Expand paths if they have a ``*`` in them (write mode) or any of ``*?[]``
563
+ in them (read mode).
564
+
565
+ :param paths: list of paths
566
+ mode: str
567
+ Mode in which to open files.
568
+ num: int
569
+ If opening in writing mode, number of files we expect to create.
570
+ fs: filesystem object
571
+ name_function: callable
572
+ If opening in writing mode, this callable is used to generate path
573
+ names. Names are generated for each partition by
574
+ ``urlpath.replace('*', name_function(partition_index))``.
575
+ :return: list of paths
576
+ """
577
+ expanded_paths = []
578
+ paths = list(paths)
579
+
580
+ if "w" in mode: # read mode
581
+ if sum([1 for p in paths if "*" in p]) > 1:
582
+ raise ValueError(
583
+ "When writing data, only one filename mask can be specified."
584
+ )
585
+ num = max(num, len(paths))
586
+
587
+ for curr_path in paths:
588
+ if "*" in curr_path:
589
+ # expand using name_function
590
+ expanded_paths.extend(_expand_paths(curr_path, name_function, num))
591
+ else:
592
+ expanded_paths.append(curr_path)
593
+ # if we generated more paths that asked for, trim the list
594
+ if len(expanded_paths) > num:
595
+ expanded_paths = expanded_paths[:num]
596
+
597
+ else: # read mode
598
+ for curr_path in paths:
599
+ if has_magic(curr_path):
600
+ # expand using glob
601
+ expanded_paths.extend(fs.glob(curr_path))
602
+ else:
603
+ expanded_paths.append(curr_path)
604
+
605
+ return expanded_paths
606
+
607
+
608
+ def get_fs_token_paths(
609
+ urlpath,
610
+ mode="rb",
611
+ num=1,
612
+ name_function=None,
613
+ storage_options=None,
614
+ protocol=None,
615
+ expand=True,
616
+ ):
617
+ """Filesystem, deterministic token, and paths from a urlpath and options.
618
+
619
+ Parameters
620
+ ----------
621
+ urlpath: string or iterable
622
+ Absolute or relative filepath, URL (may include protocols like
623
+ ``s3://``), or globstring pointing to data.
624
+ mode: str, optional
625
+ Mode in which to open files.
626
+ num: int, optional
627
+ If opening in writing mode, number of files we expect to create.
628
+ name_function: callable, optional
629
+ If opening in writing mode, this callable is used to generate path
630
+ names. Names are generated for each partition by
631
+ ``urlpath.replace('*', name_function(partition_index))``.
632
+ storage_options: dict, optional
633
+ Additional keywords to pass to the filesystem class.
634
+ protocol: str or None
635
+ To override the protocol specifier in the URL
636
+ expand: bool
637
+ Expand string paths for writing, assuming the path is a directory
638
+ """
639
+ if isinstance(urlpath, (list, tuple, set)):
640
+ if not urlpath:
641
+ raise ValueError("empty urlpath sequence")
642
+ urlpath0 = stringify_path(list(urlpath)[0])
643
+ else:
644
+ urlpath0 = stringify_path(urlpath)
645
+ storage_options = storage_options or {}
646
+ if protocol:
647
+ storage_options["protocol"] = protocol
648
+ chain = _un_chain(urlpath0, storage_options or {})
649
+ inkwargs = {}
650
+ # Reverse iterate the chain, creating a nested target_* structure
651
+ for i, ch in enumerate(reversed(chain)):
652
+ urls, nested_protocol, kw = ch
653
+ if i == len(chain) - 1:
654
+ inkwargs = dict(**kw, **inkwargs)
655
+ continue
656
+ inkwargs["target_options"] = dict(**kw, **inkwargs)
657
+ inkwargs["target_protocol"] = nested_protocol
658
+ inkwargs["fo"] = urls
659
+ paths, protocol, _ = chain[0]
660
+ fs = filesystem(protocol, **inkwargs)
661
+ if isinstance(urlpath, (list, tuple, set)):
662
+ pchains = [
663
+ _un_chain(stringify_path(u), storage_options or {})[0] for u in urlpath
664
+ ]
665
+ if len({pc[1] for pc in pchains}) > 1:
666
+ raise ValueError("Protocol mismatch getting fs from %s", urlpath)
667
+ paths = [pc[0] for pc in pchains]
668
+ else:
669
+ paths = fs._strip_protocol(paths)
670
+ if isinstance(paths, (list, tuple, set)):
671
+ if expand:
672
+ paths = expand_paths_if_needed(paths, mode, num, fs, name_function)
673
+ elif not isinstance(paths, list):
674
+ paths = list(paths)
675
+ else:
676
+ if "w" in mode and expand:
677
+ paths = _expand_paths(paths, name_function, num)
678
+ elif "x" in mode and expand:
679
+ paths = _expand_paths(paths, name_function, num)
680
+ elif "*" in paths:
681
+ paths = [f for f in sorted(fs.glob(paths)) if not fs.isdir(f)]
682
+ else:
683
+ paths = [paths]
684
+
685
+ return fs, fs._fs_token, paths
686
+
687
+
688
+ def _expand_paths(path, name_function, num):
689
+ if isinstance(path, str):
690
+ if path.count("*") > 1:
691
+ raise ValueError("Output path spec must contain exactly one '*'.")
692
+ elif "*" not in path:
693
+ path = os.path.join(path, "*.part")
694
+
695
+ if name_function is None:
696
+ name_function = build_name_function(num - 1)
697
+
698
+ paths = [path.replace("*", name_function(i)) for i in range(num)]
699
+ if paths != sorted(paths):
700
+ logger.warning(
701
+ "In order to preserve order between partitions"
702
+ " paths created with ``name_function`` should "
703
+ "sort to partition order"
704
+ )
705
+ elif isinstance(path, (tuple, list)):
706
+ assert len(path) == num
707
+ paths = list(path)
708
+ else:
709
+ raise ValueError(
710
+ "Path should be either\n"
711
+ "1. A list of paths: ['foo.json', 'bar.json', ...]\n"
712
+ "2. A directory: 'foo/\n"
713
+ "3. A path with a '*' in it: 'foo.*.json'"
714
+ )
715
+ return paths
716
+
717
+
718
+ class PickleableTextIOWrapper(io.TextIOWrapper):
719
+ """TextIOWrapper cannot be pickled. This solves it.
720
+
721
+ Requires that ``buffer`` be pickleable, which all instances of
722
+ AbstractBufferedFile are.
723
+ """
724
+
725
+ def __init__(
726
+ self,
727
+ buffer,
728
+ encoding=None,
729
+ errors=None,
730
+ newline=None,
731
+ line_buffering=False,
732
+ write_through=False,
733
+ ):
734
+ self.args = buffer, encoding, errors, newline, line_buffering, write_through
735
+ super().__init__(*self.args)
736
+
737
+ def __reduce__(self):
738
+ return PickleableTextIOWrapper, self.args
parrot/lib/python3.10/site-packages/fsspec/dircache.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ from collections.abc import MutableMapping
3
+ from functools import lru_cache
4
+
5
+
6
+ class DirCache(MutableMapping):
7
+ """
8
+ Caching of directory listings, in a structure like::
9
+
10
+ {"path0": [
11
+ {"name": "path0/file0",
12
+ "size": 123,
13
+ "type": "file",
14
+ ...
15
+ },
16
+ {"name": "path0/file1",
17
+ },
18
+ ...
19
+ ],
20
+ "path1": [...]
21
+ }
22
+
23
+ Parameters to this class control listing expiry or indeed turn
24
+ caching off
25
+ """
26
+
27
+ def __init__(
28
+ self,
29
+ use_listings_cache=True,
30
+ listings_expiry_time=None,
31
+ max_paths=None,
32
+ **kwargs,
33
+ ):
34
+ """
35
+
36
+ Parameters
37
+ ----------
38
+ use_listings_cache: bool
39
+ If False, this cache never returns items, but always reports KeyError,
40
+ and setting items has no effect
41
+ listings_expiry_time: int or float (optional)
42
+ Time in seconds that a listing is considered valid. If None,
43
+ listings do not expire.
44
+ max_paths: int (optional)
45
+ The number of most recent listings that are considered valid; 'recent'
46
+ refers to when the entry was set.
47
+ """
48
+ self._cache = {}
49
+ self._times = {}
50
+ if max_paths:
51
+ self._q = lru_cache(max_paths + 1)(lambda key: self._cache.pop(key, None))
52
+ self.use_listings_cache = use_listings_cache
53
+ self.listings_expiry_time = listings_expiry_time
54
+ self.max_paths = max_paths
55
+
56
+ def __getitem__(self, item):
57
+ if self.listings_expiry_time is not None:
58
+ if self._times.get(item, 0) - time.time() < -self.listings_expiry_time:
59
+ del self._cache[item]
60
+ if self.max_paths:
61
+ self._q(item)
62
+ return self._cache[item] # maybe raises KeyError
63
+
64
+ def clear(self):
65
+ self._cache.clear()
66
+
67
+ def __len__(self):
68
+ return len(self._cache)
69
+
70
+ def __contains__(self, item):
71
+ try:
72
+ self[item]
73
+ return True
74
+ except KeyError:
75
+ return False
76
+
77
+ def __setitem__(self, key, value):
78
+ if not self.use_listings_cache:
79
+ return
80
+ if self.max_paths:
81
+ self._q(key)
82
+ self._cache[key] = value
83
+ if self.listings_expiry_time is not None:
84
+ self._times[key] = time.time()
85
+
86
+ def __delitem__(self, key):
87
+ del self._cache[key]
88
+
89
+ def __iter__(self):
90
+ entries = list(self._cache)
91
+
92
+ return (k for k in entries if k in self)
93
+
94
+ def __reduce__(self):
95
+ return (
96
+ DirCache,
97
+ (self.use_listings_cache, self.listings_expiry_time, self.max_paths),
98
+ )
parrot/lib/python3.10/site-packages/fsspec/exceptions.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ fsspec user-defined exception classes
3
+ """
4
+
5
+ import asyncio
6
+
7
+
8
+ class BlocksizeMismatchError(ValueError):
9
+ """
10
+ Raised when a cached file is opened with a different blocksize than it was
11
+ written with
12
+ """
13
+
14
+
15
+ class FSTimeoutError(asyncio.TimeoutError):
16
+ """
17
+ Raised when a fsspec function timed out occurs
18
+ """
parrot/lib/python3.10/site-packages/fsspec/fuse.py ADDED
@@ -0,0 +1,324 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import logging
3
+ import os
4
+ import stat
5
+ import threading
6
+ import time
7
+ from errno import EIO, ENOENT
8
+
9
+ from fuse import FUSE, FuseOSError, LoggingMixIn, Operations
10
+
11
+ from fsspec import __version__
12
+ from fsspec.core import url_to_fs
13
+
14
+ logger = logging.getLogger("fsspec.fuse")
15
+
16
+
17
+ class FUSEr(Operations):
18
+ def __init__(self, fs, path, ready_file=False):
19
+ self.fs = fs
20
+ self.cache = {}
21
+ self.root = path.rstrip("/") + "/"
22
+ self.counter = 0
23
+ logger.info("Starting FUSE at %s", path)
24
+ self._ready_file = ready_file
25
+
26
+ def getattr(self, path, fh=None):
27
+ logger.debug("getattr %s", path)
28
+ if self._ready_file and path in ["/.fuse_ready", ".fuse_ready"]:
29
+ return {"type": "file", "st_size": 5}
30
+
31
+ path = "".join([self.root, path.lstrip("/")]).rstrip("/")
32
+ try:
33
+ info = self.fs.info(path)
34
+ except FileNotFoundError:
35
+ raise FuseOSError(ENOENT)
36
+
37
+ data = {"st_uid": info.get("uid", 1000), "st_gid": info.get("gid", 1000)}
38
+ perm = info.get("mode", 0o777)
39
+
40
+ if info["type"] != "file":
41
+ data["st_mode"] = stat.S_IFDIR | perm
42
+ data["st_size"] = 0
43
+ data["st_blksize"] = 0
44
+ else:
45
+ data["st_mode"] = stat.S_IFREG | perm
46
+ data["st_size"] = info["size"]
47
+ data["st_blksize"] = 5 * 2**20
48
+ data["st_nlink"] = 1
49
+ data["st_atime"] = info["atime"] if "atime" in info else time.time()
50
+ data["st_ctime"] = info["ctime"] if "ctime" in info else time.time()
51
+ data["st_mtime"] = info["mtime"] if "mtime" in info else time.time()
52
+ return data
53
+
54
+ def readdir(self, path, fh):
55
+ logger.debug("readdir %s", path)
56
+ path = "".join([self.root, path.lstrip("/")])
57
+ files = self.fs.ls(path, False)
58
+ files = [os.path.basename(f.rstrip("/")) for f in files]
59
+ return [".", ".."] + files
60
+
61
+ def mkdir(self, path, mode):
62
+ path = "".join([self.root, path.lstrip("/")])
63
+ self.fs.mkdir(path)
64
+ return 0
65
+
66
+ def rmdir(self, path):
67
+ path = "".join([self.root, path.lstrip("/")])
68
+ self.fs.rmdir(path)
69
+ return 0
70
+
71
+ def read(self, path, size, offset, fh):
72
+ logger.debug("read %s", (path, size, offset))
73
+ if self._ready_file and path in ["/.fuse_ready", ".fuse_ready"]:
74
+ # status indicator
75
+ return b"ready"
76
+
77
+ f = self.cache[fh]
78
+ f.seek(offset)
79
+ out = f.read(size)
80
+ return out
81
+
82
+ def write(self, path, data, offset, fh):
83
+ logger.debug("write %s", (path, offset))
84
+ f = self.cache[fh]
85
+ f.seek(offset)
86
+ f.write(data)
87
+ return len(data)
88
+
89
+ def create(self, path, flags, fi=None):
90
+ logger.debug("create %s", (path, flags))
91
+ fn = "".join([self.root, path.lstrip("/")])
92
+ self.fs.touch(fn) # OS will want to get attributes immediately
93
+ f = self.fs.open(fn, "wb")
94
+ self.cache[self.counter] = f
95
+ self.counter += 1
96
+ return self.counter - 1
97
+
98
+ def open(self, path, flags):
99
+ logger.debug("open %s", (path, flags))
100
+ fn = "".join([self.root, path.lstrip("/")])
101
+ if flags % 2 == 0:
102
+ # read
103
+ mode = "rb"
104
+ else:
105
+ # write/create
106
+ mode = "wb"
107
+ self.cache[self.counter] = self.fs.open(fn, mode)
108
+ self.counter += 1
109
+ return self.counter - 1
110
+
111
+ def truncate(self, path, length, fh=None):
112
+ fn = "".join([self.root, path.lstrip("/")])
113
+ if length != 0:
114
+ raise NotImplementedError
115
+ # maybe should be no-op since open with write sets size to zero anyway
116
+ self.fs.touch(fn)
117
+
118
+ def unlink(self, path):
119
+ fn = "".join([self.root, path.lstrip("/")])
120
+ try:
121
+ self.fs.rm(fn, False)
122
+ except (OSError, FileNotFoundError):
123
+ raise FuseOSError(EIO)
124
+
125
+ def release(self, path, fh):
126
+ try:
127
+ if fh in self.cache:
128
+ f = self.cache[fh]
129
+ f.close()
130
+ self.cache.pop(fh)
131
+ except Exception as e:
132
+ print(e)
133
+ return 0
134
+
135
+ def chmod(self, path, mode):
136
+ if hasattr(self.fs, "chmod"):
137
+ path = "".join([self.root, path.lstrip("/")])
138
+ return self.fs.chmod(path, mode)
139
+ raise NotImplementedError
140
+
141
+
142
+ def run(
143
+ fs,
144
+ path,
145
+ mount_point,
146
+ foreground=True,
147
+ threads=False,
148
+ ready_file=False,
149
+ ops_class=FUSEr,
150
+ ):
151
+ """Mount stuff in a local directory
152
+
153
+ This uses fusepy to make it appear as if a given path on an fsspec
154
+ instance is in fact resident within the local file-system.
155
+
156
+ This requires that fusepy by installed, and that FUSE be available on
157
+ the system (typically requiring a package to be installed with
158
+ apt, yum, brew, etc.).
159
+
160
+ Parameters
161
+ ----------
162
+ fs: file-system instance
163
+ From one of the compatible implementations
164
+ path: str
165
+ Location on that file-system to regard as the root directory to
166
+ mount. Note that you typically should include the terminating "/"
167
+ character.
168
+ mount_point: str
169
+ An empty directory on the local file-system where the contents of
170
+ the remote path will appear.
171
+ foreground: bool
172
+ Whether or not calling this function will block. Operation will
173
+ typically be more stable if True.
174
+ threads: bool
175
+ Whether or not to create threads when responding to file operations
176
+ within the mounter directory. Operation will typically be more
177
+ stable if False.
178
+ ready_file: bool
179
+ Whether the FUSE process is ready. The ``.fuse_ready`` file will
180
+ exist in the ``mount_point`` directory if True. Debugging purpose.
181
+ ops_class: FUSEr or Subclass of FUSEr
182
+ To override the default behavior of FUSEr. For Example, logging
183
+ to file.
184
+
185
+ """
186
+ func = lambda: FUSE(
187
+ ops_class(fs, path, ready_file=ready_file),
188
+ mount_point,
189
+ nothreads=not threads,
190
+ foreground=foreground,
191
+ )
192
+ if not foreground:
193
+ th = threading.Thread(target=func)
194
+ th.daemon = True
195
+ th.start()
196
+ return th
197
+ else: # pragma: no cover
198
+ try:
199
+ func()
200
+ except KeyboardInterrupt:
201
+ pass
202
+
203
+
204
+ def main(args):
205
+ """Mount filesystem from chained URL to MOUNT_POINT.
206
+
207
+ Examples:
208
+
209
+ python3 -m fsspec.fuse memory /usr/share /tmp/mem
210
+
211
+ python3 -m fsspec.fuse local /tmp/source /tmp/local \\
212
+ -l /tmp/fsspecfuse.log
213
+
214
+ You can also mount chained-URLs and use special settings:
215
+
216
+ python3 -m fsspec.fuse 'filecache::zip::file://data.zip' \\
217
+ / /tmp/zip \\
218
+ -o 'filecache-cache_storage=/tmp/simplecache'
219
+
220
+ You can specify the type of the setting by using `[int]` or `[bool]`,
221
+ (`true`, `yes`, `1` represents the Boolean value `True`):
222
+
223
+ python3 -m fsspec.fuse 'simplecache::ftp://ftp1.at.proftpd.org' \\
224
+ /historic/packages/RPMS /tmp/ftp \\
225
+ -o 'simplecache-cache_storage=/tmp/simplecache' \\
226
+ -o 'simplecache-check_files=false[bool]' \\
227
+ -o 'ftp-listings_expiry_time=60[int]' \\
228
+ -o 'ftp-username=anonymous' \\
229
+ -o 'ftp-password=xieyanbo'
230
+ """
231
+
232
+ class RawDescriptionArgumentParser(argparse.ArgumentParser):
233
+ def format_help(self):
234
+ usage = super().format_help()
235
+ parts = usage.split("\n\n")
236
+ parts[1] = self.description.rstrip()
237
+ return "\n\n".join(parts)
238
+
239
+ parser = RawDescriptionArgumentParser(prog="fsspec.fuse", description=main.__doc__)
240
+ parser.add_argument("--version", action="version", version=__version__)
241
+ parser.add_argument("url", type=str, help="fs url")
242
+ parser.add_argument("source_path", type=str, help="source directory in fs")
243
+ parser.add_argument("mount_point", type=str, help="local directory")
244
+ parser.add_argument(
245
+ "-o",
246
+ "--option",
247
+ action="append",
248
+ help="Any options of protocol included in the chained URL",
249
+ )
250
+ parser.add_argument(
251
+ "-l", "--log-file", type=str, help="Logging FUSE debug info (Default: '')"
252
+ )
253
+ parser.add_argument(
254
+ "-f",
255
+ "--foreground",
256
+ action="store_false",
257
+ help="Running in foreground or not (Default: False)",
258
+ )
259
+ parser.add_argument(
260
+ "-t",
261
+ "--threads",
262
+ action="store_false",
263
+ help="Running with threads support (Default: False)",
264
+ )
265
+ parser.add_argument(
266
+ "-r",
267
+ "--ready-file",
268
+ action="store_false",
269
+ help="The `.fuse_ready` file will exist after FUSE is ready. "
270
+ "(Debugging purpose, Default: False)",
271
+ )
272
+ args = parser.parse_args(args)
273
+
274
+ kwargs = {}
275
+ for item in args.option or []:
276
+ key, sep, value = item.partition("=")
277
+ if not sep:
278
+ parser.error(message=f"Wrong option: {item!r}")
279
+ val = value.lower()
280
+ if val.endswith("[int]"):
281
+ value = int(value[: -len("[int]")])
282
+ elif val.endswith("[bool]"):
283
+ value = val[: -len("[bool]")] in ["1", "yes", "true"]
284
+
285
+ if "-" in key:
286
+ fs_name, setting_name = key.split("-", 1)
287
+ if fs_name in kwargs:
288
+ kwargs[fs_name][setting_name] = value
289
+ else:
290
+ kwargs[fs_name] = {setting_name: value}
291
+ else:
292
+ kwargs[key] = value
293
+
294
+ if args.log_file:
295
+ logging.basicConfig(
296
+ level=logging.DEBUG,
297
+ filename=args.log_file,
298
+ format="%(asctime)s %(message)s",
299
+ )
300
+
301
+ class LoggingFUSEr(FUSEr, LoggingMixIn):
302
+ pass
303
+
304
+ fuser = LoggingFUSEr
305
+ else:
306
+ fuser = FUSEr
307
+
308
+ fs, url_path = url_to_fs(args.url, **kwargs)
309
+ logger.debug("Mounting %s to %s", url_path, str(args.mount_point))
310
+ run(
311
+ fs,
312
+ args.source_path,
313
+ args.mount_point,
314
+ foreground=args.foreground,
315
+ threads=args.threads,
316
+ ready_file=args.ready_file,
317
+ ops_class=fuser,
318
+ )
319
+
320
+
321
+ if __name__ == "__main__":
322
+ import sys
323
+
324
+ main(sys.argv[1:])
parrot/lib/python3.10/site-packages/fsspec/generic.py ADDED
@@ -0,0 +1,411 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import inspect
4
+ import logging
5
+ import os
6
+ import shutil
7
+ import uuid
8
+ from typing import Optional
9
+
10
+ from .asyn import AsyncFileSystem, _run_coros_in_chunks, sync_wrapper
11
+ from .callbacks import DEFAULT_CALLBACK
12
+ from .core import filesystem, get_filesystem_class, split_protocol, url_to_fs
13
+
14
+ _generic_fs = {}
15
+ logger = logging.getLogger("fsspec.generic")
16
+
17
+
18
+ def set_generic_fs(protocol, **storage_options):
19
+ _generic_fs[protocol] = filesystem(protocol, **storage_options)
20
+
21
+
22
+ default_method = "default"
23
+
24
+
25
+ def _resolve_fs(url, method=None, protocol=None, storage_options=None):
26
+ """Pick instance of backend FS"""
27
+ method = method or default_method
28
+ protocol = protocol or split_protocol(url)[0]
29
+ storage_options = storage_options or {}
30
+ if method == "default":
31
+ return filesystem(protocol)
32
+ if method == "generic":
33
+ return _generic_fs[protocol]
34
+ if method == "current":
35
+ cls = get_filesystem_class(protocol)
36
+ return cls.current()
37
+ if method == "options":
38
+ fs, _ = url_to_fs(url, **storage_options.get(protocol, {}))
39
+ return fs
40
+ raise ValueError(f"Unknown FS resolution method: {method}")
41
+
42
+
43
+ def rsync(
44
+ source,
45
+ destination,
46
+ delete_missing=False,
47
+ source_field="size",
48
+ dest_field="size",
49
+ update_cond="different",
50
+ inst_kwargs=None,
51
+ fs=None,
52
+ **kwargs,
53
+ ):
54
+ """Sync files between two directory trees
55
+
56
+ (experimental)
57
+
58
+ Parameters
59
+ ----------
60
+ source: str
61
+ Root of the directory tree to take files from. This must be a directory, but
62
+ do not include any terminating "/" character
63
+ destination: str
64
+ Root path to copy into. The contents of this location should be
65
+ identical to the contents of ``source`` when done. This will be made a
66
+ directory, and the terminal "/" should not be included.
67
+ delete_missing: bool
68
+ If there are paths in the destination that don't exist in the
69
+ source and this is True, delete them. Otherwise, leave them alone.
70
+ source_field: str | callable
71
+ If ``update_field`` is "different", this is the key in the info
72
+ of source files to consider for difference. Maybe a function of the
73
+ info dict.
74
+ dest_field: str | callable
75
+ If ``update_field`` is "different", this is the key in the info
76
+ of destination files to consider for difference. May be a function of
77
+ the info dict.
78
+ update_cond: "different"|"always"|"never"
79
+ If "always", every file is copied, regardless of whether it exists in
80
+ the destination. If "never", files that exist in the destination are
81
+ not copied again. If "different" (default), only copy if the info
82
+ fields given by ``source_field`` and ``dest_field`` (usually "size")
83
+ are different. Other comparisons may be added in the future.
84
+ inst_kwargs: dict|None
85
+ If ``fs`` is None, use this set of keyword arguments to make a
86
+ GenericFileSystem instance
87
+ fs: GenericFileSystem|None
88
+ Instance to use if explicitly given. The instance defines how to
89
+ to make downstream file system instances from paths.
90
+
91
+ Returns
92
+ -------
93
+ dict of the copy operations that were performed, {source: destination}
94
+ """
95
+ fs = fs or GenericFileSystem(**(inst_kwargs or {}))
96
+ source = fs._strip_protocol(source)
97
+ destination = fs._strip_protocol(destination)
98
+ allfiles = fs.find(source, withdirs=True, detail=True)
99
+ if not fs.isdir(source):
100
+ raise ValueError("Can only rsync on a directory")
101
+ otherfiles = fs.find(destination, withdirs=True, detail=True)
102
+ dirs = [
103
+ a
104
+ for a, v in allfiles.items()
105
+ if v["type"] == "directory" and a.replace(source, destination) not in otherfiles
106
+ ]
107
+ logger.debug(f"{len(dirs)} directories to create")
108
+ if dirs:
109
+ fs.make_many_dirs(
110
+ [dirn.replace(source, destination) for dirn in dirs], exist_ok=True
111
+ )
112
+ allfiles = {a: v for a, v in allfiles.items() if v["type"] == "file"}
113
+ logger.debug(f"{len(allfiles)} files to consider for copy")
114
+ to_delete = [
115
+ o
116
+ for o, v in otherfiles.items()
117
+ if o.replace(destination, source) not in allfiles and v["type"] == "file"
118
+ ]
119
+ for k, v in allfiles.copy().items():
120
+ otherfile = k.replace(source, destination)
121
+ if otherfile in otherfiles:
122
+ if update_cond == "always":
123
+ allfiles[k] = otherfile
124
+ elif update_cond == "different":
125
+ inf1 = source_field(v) if callable(source_field) else v[source_field]
126
+ v2 = otherfiles[otherfile]
127
+ inf2 = dest_field(v2) if callable(dest_field) else v2[dest_field]
128
+ if inf1 != inf2:
129
+ # details mismatch, make copy
130
+ allfiles[k] = otherfile
131
+ else:
132
+ # details match, don't copy
133
+ allfiles.pop(k)
134
+ else:
135
+ # file not in target yet
136
+ allfiles[k] = otherfile
137
+ logger.debug(f"{len(allfiles)} files to copy")
138
+ if allfiles:
139
+ source_files, target_files = zip(*allfiles.items())
140
+ fs.cp(source_files, target_files, **kwargs)
141
+ logger.debug(f"{len(to_delete)} files to delete")
142
+ if delete_missing and to_delete:
143
+ fs.rm(to_delete)
144
+ return allfiles
145
+
146
+
147
+ class GenericFileSystem(AsyncFileSystem):
148
+ """Wrapper over all other FS types
149
+
150
+ <experimental!>
151
+
152
+ This implementation is a single unified interface to be able to run FS operations
153
+ over generic URLs, and dispatch to the specific implementations using the URL
154
+ protocol prefix.
155
+
156
+ Note: instances of this FS are always async, even if you never use it with any async
157
+ backend.
158
+ """
159
+
160
+ protocol = "generic" # there is no real reason to ever use a protocol with this FS
161
+
162
+ def __init__(self, default_method="default", **kwargs):
163
+ """
164
+
165
+ Parameters
166
+ ----------
167
+ default_method: str (optional)
168
+ Defines how to configure backend FS instances. Options are:
169
+ - "default": instantiate like FSClass(), with no
170
+ extra arguments; this is the default instance of that FS, and can be
171
+ configured via the config system
172
+ - "generic": takes instances from the `_generic_fs` dict in this module,
173
+ which you must populate before use. Keys are by protocol
174
+ - "current": takes the most recently instantiated version of each FS
175
+ """
176
+ self.method = default_method
177
+ super().__init__(**kwargs)
178
+
179
+ def _parent(self, path):
180
+ fs = _resolve_fs(path, self.method)
181
+ return fs.unstrip_protocol(fs._parent(path))
182
+
183
+ def _strip_protocol(self, path):
184
+ # normalization only
185
+ fs = _resolve_fs(path, self.method)
186
+ return fs.unstrip_protocol(fs._strip_protocol(path))
187
+
188
+ async def _find(self, path, maxdepth=None, withdirs=False, detail=False, **kwargs):
189
+ fs = _resolve_fs(path, self.method)
190
+ if fs.async_impl:
191
+ out = await fs._find(
192
+ path, maxdepth=maxdepth, withdirs=withdirs, detail=True, **kwargs
193
+ )
194
+ else:
195
+ out = fs.find(
196
+ path, maxdepth=maxdepth, withdirs=withdirs, detail=True, **kwargs
197
+ )
198
+ result = {}
199
+ for k, v in out.items():
200
+ v = v.copy() # don't corrupt target FS dircache
201
+ name = fs.unstrip_protocol(k)
202
+ v["name"] = name
203
+ result[name] = v
204
+ if detail:
205
+ return result
206
+ return list(result)
207
+
208
+ async def _info(self, url, **kwargs):
209
+ fs = _resolve_fs(url, self.method)
210
+ if fs.async_impl:
211
+ out = await fs._info(url, **kwargs)
212
+ else:
213
+ out = fs.info(url, **kwargs)
214
+ out = out.copy() # don't edit originals
215
+ out["name"] = fs.unstrip_protocol(out["name"])
216
+ return out
217
+
218
+ async def _ls(
219
+ self,
220
+ url,
221
+ detail=True,
222
+ **kwargs,
223
+ ):
224
+ fs = _resolve_fs(url, self.method)
225
+ if fs.async_impl:
226
+ out = await fs._ls(url, detail=True, **kwargs)
227
+ else:
228
+ out = fs.ls(url, detail=True, **kwargs)
229
+ out = [o.copy() for o in out] # don't edit originals
230
+ for o in out:
231
+ o["name"] = fs.unstrip_protocol(o["name"])
232
+ if detail:
233
+ return out
234
+ else:
235
+ return [o["name"] for o in out]
236
+
237
+ async def _cat_file(
238
+ self,
239
+ url,
240
+ **kwargs,
241
+ ):
242
+ fs = _resolve_fs(url, self.method)
243
+ if fs.async_impl:
244
+ return await fs._cat_file(url, **kwargs)
245
+ else:
246
+ return fs.cat_file(url, **kwargs)
247
+
248
+ async def _pipe_file(
249
+ self,
250
+ path,
251
+ value,
252
+ **kwargs,
253
+ ):
254
+ fs = _resolve_fs(path, self.method)
255
+ if fs.async_impl:
256
+ return await fs._pipe_file(path, value, **kwargs)
257
+ else:
258
+ return fs.pipe_file(path, value, **kwargs)
259
+
260
+ async def _rm(self, url, **kwargs):
261
+ urls = url
262
+ if isinstance(urls, str):
263
+ urls = [urls]
264
+ fs = _resolve_fs(urls[0], self.method)
265
+ if fs.async_impl:
266
+ await fs._rm(urls, **kwargs)
267
+ else:
268
+ fs.rm(url, **kwargs)
269
+
270
+ async def _makedirs(self, path, exist_ok=False):
271
+ logger.debug("Make dir %s", path)
272
+ fs = _resolve_fs(path, self.method)
273
+ if fs.async_impl:
274
+ await fs._makedirs(path, exist_ok=exist_ok)
275
+ else:
276
+ fs.makedirs(path, exist_ok=exist_ok)
277
+
278
+ def rsync(self, source, destination, **kwargs):
279
+ """Sync files between two directory trees
280
+
281
+ See `func:rsync` for more details.
282
+ """
283
+ rsync(source, destination, fs=self, **kwargs)
284
+
285
+ async def _cp_file(
286
+ self,
287
+ url,
288
+ url2,
289
+ blocksize=2**20,
290
+ callback=DEFAULT_CALLBACK,
291
+ **kwargs,
292
+ ):
293
+ fs = _resolve_fs(url, self.method)
294
+ fs2 = _resolve_fs(url2, self.method)
295
+ if fs is fs2:
296
+ # pure remote
297
+ if fs.async_impl:
298
+ return await fs._cp_file(url, url2, **kwargs)
299
+ else:
300
+ return fs.cp_file(url, url2, **kwargs)
301
+ kw = {"blocksize": 0, "cache_type": "none"}
302
+ try:
303
+ f1 = (
304
+ await fs.open_async(url, "rb")
305
+ if hasattr(fs, "open_async")
306
+ else fs.open(url, "rb", **kw)
307
+ )
308
+ callback.set_size(await maybe_await(f1.size))
309
+ f2 = (
310
+ await fs2.open_async(url2, "wb")
311
+ if hasattr(fs2, "open_async")
312
+ else fs2.open(url2, "wb", **kw)
313
+ )
314
+ while f1.size is None or f2.tell() < f1.size:
315
+ data = await maybe_await(f1.read(blocksize))
316
+ if f1.size is None and not data:
317
+ break
318
+ await maybe_await(f2.write(data))
319
+ callback.absolute_update(f2.tell())
320
+ finally:
321
+ try:
322
+ await maybe_await(f2.close())
323
+ await maybe_await(f1.close())
324
+ except NameError:
325
+ # fail while opening f1 or f2
326
+ pass
327
+
328
+ async def _make_many_dirs(self, urls, exist_ok=True):
329
+ fs = _resolve_fs(urls[0], self.method)
330
+ if fs.async_impl:
331
+ coros = [fs._makedirs(u, exist_ok=exist_ok) for u in urls]
332
+ await _run_coros_in_chunks(coros)
333
+ else:
334
+ for u in urls:
335
+ fs.makedirs(u, exist_ok=exist_ok)
336
+
337
+ make_many_dirs = sync_wrapper(_make_many_dirs)
338
+
339
+ async def _copy(
340
+ self,
341
+ path1: list[str],
342
+ path2: list[str],
343
+ recursive: bool = False,
344
+ on_error: str = "ignore",
345
+ maxdepth: Optional[int] = None,
346
+ batch_size: Optional[int] = None,
347
+ tempdir: Optional[str] = None,
348
+ **kwargs,
349
+ ):
350
+ if recursive:
351
+ raise NotImplementedError
352
+ fs = _resolve_fs(path1[0], self.method)
353
+ fs2 = _resolve_fs(path2[0], self.method)
354
+ # not expanding paths atm., assume call is from rsync()
355
+ if fs is fs2:
356
+ # pure remote
357
+ if fs.async_impl:
358
+ return await fs._copy(path1, path2, **kwargs)
359
+ else:
360
+ return fs.copy(path1, path2, **kwargs)
361
+ await copy_file_op(
362
+ fs, path1, fs2, path2, tempdir, batch_size, on_error=on_error
363
+ )
364
+
365
+
366
+ async def copy_file_op(
367
+ fs1, url1, fs2, url2, tempdir=None, batch_size=20, on_error="ignore"
368
+ ):
369
+ import tempfile
370
+
371
+ tempdir = tempdir or tempfile.mkdtemp()
372
+ try:
373
+ coros = [
374
+ _copy_file_op(
375
+ fs1,
376
+ u1,
377
+ fs2,
378
+ u2,
379
+ os.path.join(tempdir, uuid.uuid4().hex),
380
+ on_error=on_error,
381
+ )
382
+ for u1, u2 in zip(url1, url2)
383
+ ]
384
+ await _run_coros_in_chunks(coros, batch_size=batch_size)
385
+ finally:
386
+ shutil.rmtree(tempdir)
387
+
388
+
389
+ async def _copy_file_op(fs1, url1, fs2, url2, local, on_error="ignore"):
390
+ ex = () if on_error == "raise" else Exception
391
+ logger.debug("Copy %s -> %s", url1, url2)
392
+ try:
393
+ if fs1.async_impl:
394
+ await fs1._get_file(url1, local)
395
+ else:
396
+ fs1.get_file(url1, local)
397
+ if fs2.async_impl:
398
+ await fs2._put_file(local, url2)
399
+ else:
400
+ fs2.put_file(local, url2)
401
+ os.unlink(local)
402
+ logger.debug("Copy %s -> %s; done", url1, url2)
403
+ except ex as e:
404
+ logger.debug("ignoring cp exception for %s: %s", url1, e)
405
+
406
+
407
+ async def maybe_await(cor):
408
+ if inspect.iscoroutine(cor):
409
+ return await cor
410
+ else:
411
+ return cor
parrot/lib/python3.10/site-packages/fsspec/gui.py ADDED
@@ -0,0 +1,414 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ast
2
+ import contextlib
3
+ import logging
4
+ import os
5
+ import re
6
+ from typing import ClassVar, Sequence
7
+
8
+ import panel as pn
9
+
10
+ from .core import OpenFile, get_filesystem_class, split_protocol
11
+ from .registry import known_implementations
12
+
13
+ pn.extension()
14
+ logger = logging.getLogger("fsspec.gui")
15
+
16
+
17
+ class SigSlot:
18
+ """Signal-slot mixin, for Panel event passing
19
+
20
+ Include this class in a widget manager's superclasses to be able to
21
+ register events and callbacks on Panel widgets managed by that class.
22
+
23
+ The method ``_register`` should be called as widgets are added, and external
24
+ code should call ``connect`` to associate callbacks.
25
+
26
+ By default, all signals emit a DEBUG logging statement.
27
+ """
28
+
29
+ # names of signals that this class may emit each of which must be
30
+ # set by _register for any new instance
31
+ signals: ClassVar[Sequence[str]] = []
32
+ # names of actions that this class may respond to
33
+ slots: ClassVar[Sequence[str]] = []
34
+
35
+ # each of which must be a method name
36
+
37
+ def __init__(self):
38
+ self._ignoring_events = False
39
+ self._sigs = {}
40
+ self._map = {}
41
+ self._setup()
42
+
43
+ def _setup(self):
44
+ """Create GUI elements and register signals"""
45
+ self.panel = pn.pane.PaneBase()
46
+ # no signals to set up in the base class
47
+
48
+ def _register(
49
+ self, widget, name, thing="value", log_level=logging.DEBUG, auto=False
50
+ ):
51
+ """Watch the given attribute of a widget and assign it a named event
52
+
53
+ This is normally called at the time a widget is instantiated, in the
54
+ class which owns it.
55
+
56
+ Parameters
57
+ ----------
58
+ widget : pn.layout.Panel or None
59
+ Widget to watch. If None, an anonymous signal not associated with
60
+ any widget.
61
+ name : str
62
+ Name of this event
63
+ thing : str
64
+ Attribute of the given widget to watch
65
+ log_level : int
66
+ When the signal is triggered, a logging event of the given level
67
+ will be fired in the dfviz logger.
68
+ auto : bool
69
+ If True, automatically connects with a method in this class of the
70
+ same name.
71
+ """
72
+ if name not in self.signals:
73
+ raise ValueError(f"Attempt to assign an undeclared signal: {name}")
74
+ self._sigs[name] = {
75
+ "widget": widget,
76
+ "callbacks": [],
77
+ "thing": thing,
78
+ "log": log_level,
79
+ }
80
+ wn = "-".join(
81
+ [
82
+ getattr(widget, "name", str(widget)) if widget is not None else "none",
83
+ thing,
84
+ ]
85
+ )
86
+ self._map[wn] = name
87
+ if widget is not None:
88
+ widget.param.watch(self._signal, thing, onlychanged=True)
89
+ if auto and hasattr(self, name):
90
+ self.connect(name, getattr(self, name))
91
+
92
+ def _repr_mimebundle_(self, *args, **kwargs):
93
+ """Display in a notebook or a server"""
94
+ try:
95
+ return self.panel._repr_mimebundle_(*args, **kwargs)
96
+ except (ValueError, AttributeError):
97
+ raise NotImplementedError("Panel does not seem to be set up properly")
98
+
99
+ def connect(self, signal, slot):
100
+ """Associate call back with given event
101
+
102
+ The callback must be a function which takes the "new" value of the
103
+ watched attribute as the only parameter. If the callback return False,
104
+ this cancels any further processing of the given event.
105
+
106
+ Alternatively, the callback can be a string, in which case it means
107
+ emitting the correspondingly-named event (i.e., connect to self)
108
+ """
109
+ self._sigs[signal]["callbacks"].append(slot)
110
+
111
+ def _signal(self, event):
112
+ """This is called by a an action on a widget
113
+
114
+ Within an self.ignore_events context, nothing happens.
115
+
116
+ Tests can execute this method by directly changing the values of
117
+ widget components.
118
+ """
119
+ if not self._ignoring_events:
120
+ wn = "-".join([event.obj.name, event.name])
121
+ if wn in self._map and self._map[wn] in self._sigs:
122
+ self._emit(self._map[wn], event.new)
123
+
124
+ @contextlib.contextmanager
125
+ def ignore_events(self):
126
+ """Temporarily turn off events processing in this instance
127
+
128
+ (does not propagate to children)
129
+ """
130
+ self._ignoring_events = True
131
+ try:
132
+ yield
133
+ finally:
134
+ self._ignoring_events = False
135
+
136
+ def _emit(self, sig, value=None):
137
+ """An event happened, call its callbacks
138
+
139
+ This method can be used in tests to simulate message passing without
140
+ directly changing visual elements.
141
+
142
+ Calling of callbacks will halt whenever one returns False.
143
+ """
144
+ logger.log(self._sigs[sig]["log"], f"{sig}: {value}")
145
+ for callback in self._sigs[sig]["callbacks"]:
146
+ if isinstance(callback, str):
147
+ self._emit(callback)
148
+ else:
149
+ try:
150
+ # running callbacks should not break the interface
151
+ ret = callback(value)
152
+ if ret is False:
153
+ break
154
+ except Exception as e:
155
+ logger.exception(
156
+ "Exception (%s) while executing callback for signal: %s",
157
+ e,
158
+ sig,
159
+ )
160
+
161
+ def show(self, threads=False):
162
+ """Open a new browser tab and display this instance's interface"""
163
+ self.panel.show(threads=threads, verbose=False)
164
+ return self
165
+
166
+
167
+ class SingleSelect(SigSlot):
168
+ """A multiselect which only allows you to select one item for an event"""
169
+
170
+ signals = ["_selected", "selected"] # the first is internal
171
+ slots = ["set_options", "set_selection", "add", "clear", "select"]
172
+
173
+ def __init__(self, **kwargs):
174
+ self.kwargs = kwargs
175
+ super().__init__()
176
+
177
+ def _setup(self):
178
+ self.panel = pn.widgets.MultiSelect(**self.kwargs)
179
+ self._register(self.panel, "_selected", "value")
180
+ self._register(None, "selected")
181
+ self.connect("_selected", self.select_one)
182
+
183
+ def _signal(self, *args, **kwargs):
184
+ super()._signal(*args, **kwargs)
185
+
186
+ def select_one(self, *_):
187
+ with self.ignore_events():
188
+ val = [self.panel.value[-1]] if self.panel.value else []
189
+ self.panel.value = val
190
+ self._emit("selected", self.panel.value)
191
+
192
+ def set_options(self, options):
193
+ self.panel.options = options
194
+
195
+ def clear(self):
196
+ self.panel.options = []
197
+
198
+ @property
199
+ def value(self):
200
+ return self.panel.value
201
+
202
+ def set_selection(self, selection):
203
+ self.panel.value = [selection]
204
+
205
+
206
+ class FileSelector(SigSlot):
207
+ """Panel-based graphical file selector widget
208
+
209
+ Instances of this widget are interactive and can be displayed in jupyter by having
210
+ them as the output of a cell, or in a separate browser tab using ``.show()``.
211
+ """
212
+
213
+ signals = [
214
+ "protocol_changed",
215
+ "selection_changed",
216
+ "directory_entered",
217
+ "home_clicked",
218
+ "up_clicked",
219
+ "go_clicked",
220
+ "filters_changed",
221
+ ]
222
+ slots = ["set_filters", "go_home"]
223
+
224
+ def __init__(self, url=None, filters=None, ignore=None, kwargs=None):
225
+ """
226
+
227
+ Parameters
228
+ ----------
229
+ url : str (optional)
230
+ Initial value of the URL to populate the dialog; should include protocol
231
+ filters : list(str) (optional)
232
+ File endings to include in the listings. If not included, all files are
233
+ allowed. Does not affect directories.
234
+ If given, the endings will appear as checkboxes in the interface
235
+ ignore : list(str) (optional)
236
+ Regex(s) of file basename patterns to ignore, e.g., "\\." for typical
237
+ hidden files on posix
238
+ kwargs : dict (optional)
239
+ To pass to file system instance
240
+ """
241
+ if url:
242
+ self.init_protocol, url = split_protocol(url)
243
+ else:
244
+ self.init_protocol, url = "file", os.getcwd()
245
+ self.init_url = url
246
+ self.init_kwargs = (kwargs if isinstance(kwargs, str) else str(kwargs)) or "{}"
247
+ self.filters = filters
248
+ self.ignore = [re.compile(i) for i in ignore or []]
249
+ self._fs = None
250
+ super().__init__()
251
+
252
+ def _setup(self):
253
+ self.url = pn.widgets.TextInput(
254
+ name="url",
255
+ value=self.init_url,
256
+ align="end",
257
+ sizing_mode="stretch_width",
258
+ width_policy="max",
259
+ )
260
+ self.protocol = pn.widgets.Select(
261
+ options=sorted(known_implementations),
262
+ value=self.init_protocol,
263
+ name="protocol",
264
+ align="center",
265
+ )
266
+ self.kwargs = pn.widgets.TextInput(
267
+ name="kwargs", value=self.init_kwargs, align="center"
268
+ )
269
+ self.go = pn.widgets.Button(name="⇨", align="end", width=45)
270
+ self.main = SingleSelect(size=10)
271
+ self.home = pn.widgets.Button(name="🏠", width=40, height=30, align="end")
272
+ self.up = pn.widgets.Button(name="‹", width=30, height=30, align="end")
273
+
274
+ self._register(self.protocol, "protocol_changed", auto=True)
275
+ self._register(self.go, "go_clicked", "clicks", auto=True)
276
+ self._register(self.up, "up_clicked", "clicks", auto=True)
277
+ self._register(self.home, "home_clicked", "clicks", auto=True)
278
+ self._register(None, "selection_changed")
279
+ self.main.connect("selected", self.selection_changed)
280
+ self._register(None, "directory_entered")
281
+ self.prev_protocol = self.protocol.value
282
+ self.prev_kwargs = self.storage_options
283
+
284
+ self.filter_sel = pn.widgets.CheckBoxGroup(
285
+ value=[], options=[], inline=False, align="end", width_policy="min"
286
+ )
287
+ self._register(self.filter_sel, "filters_changed", auto=True)
288
+
289
+ self.panel = pn.Column(
290
+ pn.Row(self.protocol, self.kwargs),
291
+ pn.Row(self.home, self.up, self.url, self.go, self.filter_sel),
292
+ self.main.panel,
293
+ )
294
+ self.set_filters(self.filters)
295
+ self.go_clicked()
296
+
297
+ def set_filters(self, filters=None):
298
+ self.filters = filters
299
+ if filters:
300
+ self.filter_sel.options = filters
301
+ self.filter_sel.value = filters
302
+ else:
303
+ self.filter_sel.options = []
304
+ self.filter_sel.value = []
305
+
306
+ @property
307
+ def storage_options(self):
308
+ """Value of the kwargs box as a dictionary"""
309
+ return ast.literal_eval(self.kwargs.value) or {}
310
+
311
+ @property
312
+ def fs(self):
313
+ """Current filesystem instance"""
314
+ if self._fs is None:
315
+ cls = get_filesystem_class(self.protocol.value)
316
+ self._fs = cls(**self.storage_options)
317
+ return self._fs
318
+
319
+ @property
320
+ def urlpath(self):
321
+ """URL of currently selected item"""
322
+ return (
323
+ (f"{self.protocol.value}://{self.main.value[0]}")
324
+ if self.main.value
325
+ else None
326
+ )
327
+
328
+ def open_file(self, mode="rb", compression=None, encoding=None):
329
+ """Create OpenFile instance for the currently selected item
330
+
331
+ For example, in a notebook you might do something like
332
+
333
+ .. code-block::
334
+
335
+ [ ]: sel = FileSelector(); sel
336
+
337
+ # user selects their file
338
+
339
+ [ ]: with sel.open_file('rb') as f:
340
+ ... out = f.read()
341
+
342
+ Parameters
343
+ ----------
344
+ mode: str (optional)
345
+ Open mode for the file.
346
+ compression: str (optional)
347
+ The interact with the file as compressed. Set to 'infer' to guess
348
+ compression from the file ending
349
+ encoding: str (optional)
350
+ If using text mode, use this encoding; defaults to UTF8.
351
+ """
352
+ if self.urlpath is None:
353
+ raise ValueError("No file selected")
354
+ return OpenFile(self.fs, self.urlpath, mode, compression, encoding)
355
+
356
+ def filters_changed(self, values):
357
+ self.filters = values
358
+ self.go_clicked()
359
+
360
+ def selection_changed(self, *_):
361
+ if self.urlpath is None:
362
+ return
363
+ if self.fs.isdir(self.urlpath):
364
+ self.url.value = self.fs._strip_protocol(self.urlpath)
365
+ self.go_clicked()
366
+
367
+ def go_clicked(self, *_):
368
+ if (
369
+ self.prev_protocol != self.protocol.value
370
+ or self.prev_kwargs != self.storage_options
371
+ ):
372
+ self._fs = None # causes fs to be recreated
373
+ self.prev_protocol = self.protocol.value
374
+ self.prev_kwargs = self.storage_options
375
+ listing = sorted(
376
+ self.fs.ls(self.url.value, detail=True), key=lambda x: x["name"]
377
+ )
378
+ listing = [
379
+ l
380
+ for l in listing
381
+ if not any(i.match(l["name"].rsplit("/", 1)[-1]) for i in self.ignore)
382
+ ]
383
+ folders = {
384
+ "📁 " + o["name"].rsplit("/", 1)[-1]: o["name"]
385
+ for o in listing
386
+ if o["type"] == "directory"
387
+ }
388
+ files = {
389
+ "📄 " + o["name"].rsplit("/", 1)[-1]: o["name"]
390
+ for o in listing
391
+ if o["type"] == "file"
392
+ }
393
+ if self.filters:
394
+ files = {
395
+ k: v
396
+ for k, v in files.items()
397
+ if any(v.endswith(ext) for ext in self.filters)
398
+ }
399
+ self.main.set_options(dict(**folders, **files))
400
+
401
+ def protocol_changed(self, *_):
402
+ self._fs = None
403
+ self.main.options = []
404
+ self.url.value = ""
405
+
406
+ def home_clicked(self, *_):
407
+ self.protocol.value = self.init_protocol
408
+ self.kwargs.value = self.init_kwargs
409
+ self.url.value = self.init_url
410
+ self.go_clicked()
411
+
412
+ def up_clicked(self, *_):
413
+ self.url.value = self.fs._parent(self.url.value)
414
+ self.go_clicked()
parrot/lib/python3.10/site-packages/fsspec/implementations/cache_metadata.py ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ import pickle
5
+ import time
6
+ from typing import TYPE_CHECKING
7
+
8
+ from fsspec.utils import atomic_write
9
+
10
+ try:
11
+ import ujson as json
12
+ except ImportError:
13
+ if not TYPE_CHECKING:
14
+ import json
15
+
16
+ if TYPE_CHECKING:
17
+ from typing import Any, Dict, Iterator, Literal
18
+
19
+ from typing_extensions import TypeAlias
20
+
21
+ from .cached import CachingFileSystem
22
+
23
+ Detail: TypeAlias = Dict[str, Any]
24
+
25
+
26
+ class CacheMetadata:
27
+ """Cache metadata.
28
+
29
+ All reading and writing of cache metadata is performed by this class,
30
+ accessing the cached files and blocks is not.
31
+
32
+ Metadata is stored in a single file per storage directory in JSON format.
33
+ For backward compatibility, also reads metadata stored in pickle format
34
+ which is converted to JSON when next saved.
35
+ """
36
+
37
+ def __init__(self, storage: list[str]):
38
+ """
39
+
40
+ Parameters
41
+ ----------
42
+ storage: list[str]
43
+ Directories containing cached files, must be at least one. Metadata
44
+ is stored in the last of these directories by convention.
45
+ """
46
+ if not storage:
47
+ raise ValueError("CacheMetadata expects at least one storage location")
48
+
49
+ self._storage = storage
50
+ self.cached_files: list[Detail] = [{}]
51
+
52
+ # Private attribute to force saving of metadata in pickle format rather than
53
+ # JSON for use in tests to confirm can read both pickle and JSON formats.
54
+ self._force_save_pickle = False
55
+
56
+ def _load(self, fn: str) -> Detail:
57
+ """Low-level function to load metadata from specific file"""
58
+ try:
59
+ with open(fn, "r") as f:
60
+ loaded = json.load(f)
61
+ except ValueError:
62
+ with open(fn, "rb") as f:
63
+ loaded = pickle.load(f)
64
+ for c in loaded.values():
65
+ if isinstance(c.get("blocks"), list):
66
+ c["blocks"] = set(c["blocks"])
67
+ return loaded
68
+
69
+ def _save(self, metadata_to_save: Detail, fn: str) -> None:
70
+ """Low-level function to save metadata to specific file"""
71
+ if self._force_save_pickle:
72
+ with atomic_write(fn) as f:
73
+ pickle.dump(metadata_to_save, f)
74
+ else:
75
+ with atomic_write(fn, mode="w") as f:
76
+ json.dump(metadata_to_save, f)
77
+
78
+ def _scan_locations(
79
+ self, writable_only: bool = False
80
+ ) -> Iterator[tuple[str, str, bool]]:
81
+ """Yield locations (filenames) where metadata is stored, and whether
82
+ writable or not.
83
+
84
+ Parameters
85
+ ----------
86
+ writable: bool
87
+ Set to True to only yield writable locations.
88
+
89
+ Returns
90
+ -------
91
+ Yields (str, str, bool)
92
+ """
93
+ n = len(self._storage)
94
+ for i, storage in enumerate(self._storage):
95
+ writable = i == n - 1
96
+ if writable_only and not writable:
97
+ continue
98
+ yield os.path.join(storage, "cache"), storage, writable
99
+
100
+ def check_file(
101
+ self, path: str, cfs: CachingFileSystem | None
102
+ ) -> Literal[False] | tuple[Detail, str]:
103
+ """If path is in cache return its details, otherwise return ``False``.
104
+
105
+ If the optional CachingFileSystem is specified then it is used to
106
+ perform extra checks to reject possible matches, such as if they are
107
+ too old.
108
+ """
109
+ for (fn, base, _), cache in zip(self._scan_locations(), self.cached_files):
110
+ if path not in cache:
111
+ continue
112
+ detail = cache[path].copy()
113
+
114
+ if cfs is not None:
115
+ if cfs.check_files and detail["uid"] != cfs.fs.ukey(path):
116
+ # Wrong file as determined by hash of file properties
117
+ continue
118
+ if cfs.expiry and time.time() - detail["time"] > cfs.expiry:
119
+ # Cached file has expired
120
+ continue
121
+
122
+ fn = os.path.join(base, detail["fn"])
123
+ if os.path.exists(fn):
124
+ return detail, fn
125
+ return False
126
+
127
+ def clear_expired(self, expiry_time: int) -> tuple[list[str], bool]:
128
+ """Remove expired metadata from the cache.
129
+
130
+ Returns names of files corresponding to expired metadata and a boolean
131
+ flag indicating whether the writable cache is empty. Caller is
132
+ responsible for deleting the expired files.
133
+ """
134
+ expired_files = []
135
+ for path, detail in self.cached_files[-1].copy().items():
136
+ if time.time() - detail["time"] > expiry_time:
137
+ fn = detail.get("fn", "")
138
+ if not fn:
139
+ raise RuntimeError(
140
+ f"Cache metadata does not contain 'fn' for {path}"
141
+ )
142
+ fn = os.path.join(self._storage[-1], fn)
143
+ expired_files.append(fn)
144
+ self.cached_files[-1].pop(path)
145
+
146
+ if self.cached_files[-1]:
147
+ cache_path = os.path.join(self._storage[-1], "cache")
148
+ self._save(self.cached_files[-1], cache_path)
149
+
150
+ writable_cache_empty = not self.cached_files[-1]
151
+ return expired_files, writable_cache_empty
152
+
153
+ def load(self) -> None:
154
+ """Load all metadata from disk and store in ``self.cached_files``"""
155
+ cached_files = []
156
+ for fn, _, _ in self._scan_locations():
157
+ if os.path.exists(fn):
158
+ # TODO: consolidate blocks here
159
+ cached_files.append(self._load(fn))
160
+ else:
161
+ cached_files.append({})
162
+ self.cached_files = cached_files or [{}]
163
+
164
+ def on_close_cached_file(self, f: Any, path: str) -> None:
165
+ """Perform side-effect actions on closing a cached file.
166
+
167
+ The actual closing of the file is the responsibility of the caller.
168
+ """
169
+ # File must be writeble, so in self.cached_files[-1]
170
+ c = self.cached_files[-1][path]
171
+ if c["blocks"] is not True and len(c["blocks"]) * f.blocksize >= f.size:
172
+ c["blocks"] = True
173
+
174
+ def pop_file(self, path: str) -> str | None:
175
+ """Remove metadata of cached file.
176
+
177
+ If path is in the cache, return the filename of the cached file,
178
+ otherwise return ``None``. Caller is responsible for deleting the
179
+ cached file.
180
+ """
181
+ details = self.check_file(path, None)
182
+ if not details:
183
+ return None
184
+ _, fn = details
185
+ if fn.startswith(self._storage[-1]):
186
+ self.cached_files[-1].pop(path)
187
+ self.save()
188
+ else:
189
+ raise PermissionError(
190
+ "Can only delete cached file in last, writable cache location"
191
+ )
192
+ return fn
193
+
194
+ def save(self) -> None:
195
+ """Save metadata to disk"""
196
+ for (fn, _, writable), cache in zip(self._scan_locations(), self.cached_files):
197
+ if not writable:
198
+ continue
199
+
200
+ if os.path.exists(fn):
201
+ cached_files = self._load(fn)
202
+ for k, c in cached_files.items():
203
+ if k in cache:
204
+ if c["blocks"] is True or cache[k]["blocks"] is True:
205
+ c["blocks"] = True
206
+ else:
207
+ # self.cached_files[*][*]["blocks"] must continue to
208
+ # point to the same set object so that updates
209
+ # performed by MMapCache are propagated back to
210
+ # self.cached_files.
211
+ blocks = cache[k]["blocks"]
212
+ blocks.update(c["blocks"])
213
+ c["blocks"] = blocks
214
+ c["time"] = max(c["time"], cache[k]["time"])
215
+ c["uid"] = cache[k]["uid"]
216
+
217
+ # Files can be added to cache after it was written once
218
+ for k, c in cache.items():
219
+ if k not in cached_files:
220
+ cached_files[k] = c
221
+ else:
222
+ cached_files = cache
223
+ cache = {k: v.copy() for k, v in cached_files.items()}
224
+ for c in cache.values():
225
+ if isinstance(c["blocks"], set):
226
+ c["blocks"] = list(c["blocks"])
227
+ self._save(cache, fn)
228
+ self.cached_files[-1] = cached_files
229
+
230
+ def update_file(self, path: str, detail: Detail) -> None:
231
+ """Update metadata for specific file in memory, do not save"""
232
+ self.cached_files[-1][path] = detail
parrot/lib/python3.10/site-packages/fsspec/implementations/dask.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import dask
2
+ from distributed.client import Client, _get_global_client
3
+ from distributed.worker import Worker
4
+
5
+ from fsspec import filesystem
6
+ from fsspec.spec import AbstractBufferedFile, AbstractFileSystem
7
+ from fsspec.utils import infer_storage_options
8
+
9
+
10
+ def _get_client(client):
11
+ if client is None:
12
+ return _get_global_client()
13
+ elif isinstance(client, Client):
14
+ return client
15
+ else:
16
+ # e.g., connection string
17
+ return Client(client)
18
+
19
+
20
+ def _in_worker():
21
+ return bool(Worker._instances)
22
+
23
+
24
+ class DaskWorkerFileSystem(AbstractFileSystem):
25
+ """View files accessible to a worker as any other remote file-system
26
+
27
+ When instances are run on the worker, uses the real filesystem. When
28
+ run on the client, they call the worker to provide information or data.
29
+
30
+ **Warning** this implementation is experimental, and read-only for now.
31
+ """
32
+
33
+ def __init__(
34
+ self, target_protocol=None, target_options=None, fs=None, client=None, **kwargs
35
+ ):
36
+ super().__init__(**kwargs)
37
+ if not (fs is None) ^ (target_protocol is None):
38
+ raise ValueError(
39
+ "Please provide one of filesystem instance (fs) or"
40
+ " target_protocol, not both"
41
+ )
42
+ self.target_protocol = target_protocol
43
+ self.target_options = target_options
44
+ self.worker = None
45
+ self.client = client
46
+ self.fs = fs
47
+ self._determine_worker()
48
+
49
+ @staticmethod
50
+ def _get_kwargs_from_urls(path):
51
+ so = infer_storage_options(path)
52
+ if "host" in so and "port" in so:
53
+ return {"client": f"{so['host']}:{so['port']}"}
54
+ else:
55
+ return {}
56
+
57
+ def _determine_worker(self):
58
+ if _in_worker():
59
+ self.worker = True
60
+ if self.fs is None:
61
+ self.fs = filesystem(
62
+ self.target_protocol, **(self.target_options or {})
63
+ )
64
+ else:
65
+ self.worker = False
66
+ self.client = _get_client(self.client)
67
+ self.rfs = dask.delayed(self)
68
+
69
+ def mkdir(self, *args, **kwargs):
70
+ if self.worker:
71
+ self.fs.mkdir(*args, **kwargs)
72
+ else:
73
+ self.rfs.mkdir(*args, **kwargs).compute()
74
+
75
+ def rm(self, *args, **kwargs):
76
+ if self.worker:
77
+ self.fs.rm(*args, **kwargs)
78
+ else:
79
+ self.rfs.rm(*args, **kwargs).compute()
80
+
81
+ def copy(self, *args, **kwargs):
82
+ if self.worker:
83
+ self.fs.copy(*args, **kwargs)
84
+ else:
85
+ self.rfs.copy(*args, **kwargs).compute()
86
+
87
+ def mv(self, *args, **kwargs):
88
+ if self.worker:
89
+ self.fs.mv(*args, **kwargs)
90
+ else:
91
+ self.rfs.mv(*args, **kwargs).compute()
92
+
93
+ def ls(self, *args, **kwargs):
94
+ if self.worker:
95
+ return self.fs.ls(*args, **kwargs)
96
+ else:
97
+ return self.rfs.ls(*args, **kwargs).compute()
98
+
99
+ def _open(
100
+ self,
101
+ path,
102
+ mode="rb",
103
+ block_size=None,
104
+ autocommit=True,
105
+ cache_options=None,
106
+ **kwargs,
107
+ ):
108
+ if self.worker:
109
+ return self.fs._open(
110
+ path,
111
+ mode=mode,
112
+ block_size=block_size,
113
+ autocommit=autocommit,
114
+ cache_options=cache_options,
115
+ **kwargs,
116
+ )
117
+ else:
118
+ return DaskFile(
119
+ fs=self,
120
+ path=path,
121
+ mode=mode,
122
+ block_size=block_size,
123
+ autocommit=autocommit,
124
+ cache_options=cache_options,
125
+ **kwargs,
126
+ )
127
+
128
+ def fetch_range(self, path, mode, start, end):
129
+ if self.worker:
130
+ with self._open(path, mode) as f:
131
+ f.seek(start)
132
+ return f.read(end - start)
133
+ else:
134
+ return self.rfs.fetch_range(path, mode, start, end).compute()
135
+
136
+
137
+ class DaskFile(AbstractBufferedFile):
138
+ def __init__(self, mode="rb", **kwargs):
139
+ if mode != "rb":
140
+ raise ValueError('Remote dask files can only be opened in "rb" mode')
141
+ super().__init__(**kwargs)
142
+
143
+ def _upload_chunk(self, final=False):
144
+ pass
145
+
146
+ def _initiate_upload(self):
147
+ """Create remote file/upload"""
148
+ pass
149
+
150
+ def _fetch_range(self, start, end):
151
+ """Get the specified set of bytes from remote"""
152
+ return self.fs.fetch_range(self.path, self.mode, start, end)
parrot/lib/python3.10/site-packages/fsspec/implementations/jupyter.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ import io
3
+ import re
4
+
5
+ import requests
6
+
7
+ import fsspec
8
+
9
+
10
+ class JupyterFileSystem(fsspec.AbstractFileSystem):
11
+ """View of the files as seen by a Jupyter server (notebook or lab)"""
12
+
13
+ protocol = ("jupyter", "jlab")
14
+
15
+ def __init__(self, url, tok=None, **kwargs):
16
+ """
17
+
18
+ Parameters
19
+ ----------
20
+ url : str
21
+ Base URL of the server, like "http://127.0.0.1:8888". May include
22
+ token in the string, which is given by the process when starting up
23
+ tok : str
24
+ If the token is obtained separately, can be given here
25
+ kwargs
26
+ """
27
+ if "?" in url:
28
+ if tok is None:
29
+ try:
30
+ tok = re.findall("token=([a-z0-9]+)", url)[0]
31
+ except IndexError as e:
32
+ raise ValueError("Could not determine token") from e
33
+ url = url.split("?", 1)[0]
34
+ self.url = url.rstrip("/") + "/api/contents"
35
+ self.session = requests.Session()
36
+ if tok:
37
+ self.session.headers["Authorization"] = f"token {tok}"
38
+
39
+ super().__init__(**kwargs)
40
+
41
+ def ls(self, path, detail=True, **kwargs):
42
+ path = self._strip_protocol(path)
43
+ r = self.session.get(f"{self.url}/{path}")
44
+ if r.status_code == 404:
45
+ return FileNotFoundError(path)
46
+ r.raise_for_status()
47
+ out = r.json()
48
+
49
+ if out["type"] == "directory":
50
+ out = out["content"]
51
+ else:
52
+ out = [out]
53
+ for o in out:
54
+ o["name"] = o.pop("path")
55
+ o.pop("content")
56
+ if o["type"] == "notebook":
57
+ o["type"] = "file"
58
+ if detail:
59
+ return out
60
+ return [o["name"] for o in out]
61
+
62
+ def cat_file(self, path, start=None, end=None, **kwargs):
63
+ path = self._strip_protocol(path)
64
+ r = self.session.get(f"{self.url}/{path}")
65
+ if r.status_code == 404:
66
+ return FileNotFoundError(path)
67
+ r.raise_for_status()
68
+ out = r.json()
69
+ if out["format"] == "text":
70
+ # data should be binary
71
+ b = out["content"].encode()
72
+ else:
73
+ b = base64.b64decode(out["content"])
74
+ return b[start:end]
75
+
76
+ def pipe_file(self, path, value, **_):
77
+ path = self._strip_protocol(path)
78
+ json = {
79
+ "name": path.rsplit("/", 1)[-1],
80
+ "path": path,
81
+ "size": len(value),
82
+ "content": base64.b64encode(value).decode(),
83
+ "format": "base64",
84
+ "type": "file",
85
+ }
86
+ self.session.put(f"{self.url}/{path}", json=json)
87
+
88
+ def mkdir(self, path, create_parents=True, **kwargs):
89
+ path = self._strip_protocol(path)
90
+ if create_parents and "/" in path:
91
+ self.mkdir(path.rsplit("/", 1)[0], True)
92
+ json = {
93
+ "name": path.rsplit("/", 1)[-1],
94
+ "path": path,
95
+ "size": None,
96
+ "content": None,
97
+ "type": "directory",
98
+ }
99
+ self.session.put(f"{self.url}/{path}", json=json)
100
+
101
+ def _rm(self, path):
102
+ path = self._strip_protocol(path)
103
+ self.session.delete(f"{self.url}/{path}")
104
+
105
+ def _open(self, path, mode="rb", **kwargs):
106
+ path = self._strip_protocol(path)
107
+ if mode == "rb":
108
+ data = self.cat_file(path)
109
+ return io.BytesIO(data)
110
+ else:
111
+ return SimpleFileWriter(self, path, mode="wb")
112
+
113
+
114
+ class SimpleFileWriter(fsspec.spec.AbstractBufferedFile):
115
+ def _upload_chunk(self, final=False):
116
+ """Never uploads a chunk until file is done
117
+
118
+ Not suitable for large files
119
+ """
120
+ if final is False:
121
+ return False
122
+ self.buffer.seek(0)
123
+ data = self.buffer.read()
124
+ self.fs.pipe_file(self.path, data)
parrot/lib/python3.10/site-packages/fsspec/implementations/memory.py ADDED
@@ -0,0 +1,303 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import logging
4
+ from datetime import datetime, timezone
5
+ from errno import ENOTEMPTY
6
+ from io import BytesIO
7
+ from pathlib import PurePath, PureWindowsPath
8
+ from typing import Any, ClassVar
9
+
10
+ from fsspec import AbstractFileSystem
11
+ from fsspec.implementations.local import LocalFileSystem
12
+ from fsspec.utils import stringify_path
13
+
14
+ logger = logging.getLogger("fsspec.memoryfs")
15
+
16
+
17
+ class MemoryFileSystem(AbstractFileSystem):
18
+ """A filesystem based on a dict of BytesIO objects
19
+
20
+ This is a global filesystem so instances of this class all point to the same
21
+ in memory filesystem.
22
+ """
23
+
24
+ store: ClassVar[dict[str, Any]] = {} # global, do not overwrite!
25
+ pseudo_dirs = [""] # global, do not overwrite!
26
+ protocol = "memory"
27
+ root_marker = "/"
28
+
29
+ @classmethod
30
+ def _strip_protocol(cls, path):
31
+ if isinstance(path, PurePath):
32
+ if isinstance(path, PureWindowsPath):
33
+ return LocalFileSystem._strip_protocol(path)
34
+ else:
35
+ path = stringify_path(path)
36
+
37
+ if path.startswith("memory://"):
38
+ path = path[len("memory://") :]
39
+ if "::" in path or "://" in path:
40
+ return path.rstrip("/")
41
+ path = path.lstrip("/").rstrip("/")
42
+ return "/" + path if path else ""
43
+
44
+ def ls(self, path, detail=True, **kwargs):
45
+ path = self._strip_protocol(path)
46
+ if path in self.store:
47
+ # there is a key with this exact name
48
+ if not detail:
49
+ return [path]
50
+ return [
51
+ {
52
+ "name": path,
53
+ "size": self.store[path].size,
54
+ "type": "file",
55
+ "created": self.store[path].created.timestamp(),
56
+ }
57
+ ]
58
+ paths = set()
59
+ starter = path + "/"
60
+ out = []
61
+ for p2 in tuple(self.store):
62
+ if p2.startswith(starter):
63
+ if "/" not in p2[len(starter) :]:
64
+ # exact child
65
+ out.append(
66
+ {
67
+ "name": p2,
68
+ "size": self.store[p2].size,
69
+ "type": "file",
70
+ "created": self.store[p2].created.timestamp(),
71
+ }
72
+ )
73
+ elif len(p2) > len(starter):
74
+ # implied child directory
75
+ ppath = starter + p2[len(starter) :].split("/", 1)[0]
76
+ if ppath not in paths:
77
+ out = out or []
78
+ out.append(
79
+ {
80
+ "name": ppath,
81
+ "size": 0,
82
+ "type": "directory",
83
+ }
84
+ )
85
+ paths.add(ppath)
86
+ for p2 in self.pseudo_dirs:
87
+ if p2.startswith(starter):
88
+ if "/" not in p2[len(starter) :]:
89
+ # exact child pdir
90
+ if p2 not in paths:
91
+ out.append({"name": p2, "size": 0, "type": "directory"})
92
+ paths.add(p2)
93
+ else:
94
+ # directory implied by deeper pdir
95
+ ppath = starter + p2[len(starter) :].split("/", 1)[0]
96
+ if ppath not in paths:
97
+ out.append({"name": ppath, "size": 0, "type": "directory"})
98
+ paths.add(ppath)
99
+ if not out:
100
+ if path in self.pseudo_dirs:
101
+ # empty dir
102
+ return []
103
+ raise FileNotFoundError(path)
104
+ if detail:
105
+ return out
106
+ return sorted([f["name"] for f in out])
107
+
108
+ def mkdir(self, path, create_parents=True, **kwargs):
109
+ path = self._strip_protocol(path)
110
+ if path in self.store or path in self.pseudo_dirs:
111
+ raise FileExistsError(path)
112
+ if self._parent(path).strip("/") and self.isfile(self._parent(path)):
113
+ raise NotADirectoryError(self._parent(path))
114
+ if create_parents and self._parent(path).strip("/"):
115
+ try:
116
+ self.mkdir(self._parent(path), create_parents, **kwargs)
117
+ except FileExistsError:
118
+ pass
119
+ if path and path not in self.pseudo_dirs:
120
+ self.pseudo_dirs.append(path)
121
+
122
+ def makedirs(self, path, exist_ok=False):
123
+ try:
124
+ self.mkdir(path, create_parents=True)
125
+ except FileExistsError:
126
+ if not exist_ok:
127
+ raise
128
+
129
+ def pipe_file(self, path, value, **kwargs):
130
+ """Set the bytes of given file
131
+
132
+ Avoids copies of the data if possible
133
+ """
134
+ self.open(path, "wb", data=value)
135
+
136
+ def rmdir(self, path):
137
+ path = self._strip_protocol(path)
138
+ if path == "":
139
+ # silently avoid deleting FS root
140
+ return
141
+ if path in self.pseudo_dirs:
142
+ if not self.ls(path):
143
+ self.pseudo_dirs.remove(path)
144
+ else:
145
+ raise OSError(ENOTEMPTY, "Directory not empty", path)
146
+ else:
147
+ raise FileNotFoundError(path)
148
+
149
+ def info(self, path, **kwargs):
150
+ logger.debug("info: %s", path)
151
+ path = self._strip_protocol(path)
152
+ if path in self.pseudo_dirs or any(
153
+ p.startswith(path + "/") for p in list(self.store) + self.pseudo_dirs
154
+ ):
155
+ return {
156
+ "name": path,
157
+ "size": 0,
158
+ "type": "directory",
159
+ }
160
+ elif path in self.store:
161
+ filelike = self.store[path]
162
+ return {
163
+ "name": path,
164
+ "size": filelike.size,
165
+ "type": "file",
166
+ "created": getattr(filelike, "created", None),
167
+ }
168
+ else:
169
+ raise FileNotFoundError(path)
170
+
171
+ def _open(
172
+ self,
173
+ path,
174
+ mode="rb",
175
+ block_size=None,
176
+ autocommit=True,
177
+ cache_options=None,
178
+ **kwargs,
179
+ ):
180
+ path = self._strip_protocol(path)
181
+ if path in self.pseudo_dirs:
182
+ raise IsADirectoryError(path)
183
+ parent = path
184
+ while len(parent) > 1:
185
+ parent = self._parent(parent)
186
+ if self.isfile(parent):
187
+ raise FileExistsError(parent)
188
+ if mode in ["rb", "ab", "r+b"]:
189
+ if path in self.store:
190
+ f = self.store[path]
191
+ if mode == "ab":
192
+ # position at the end of file
193
+ f.seek(0, 2)
194
+ else:
195
+ # position at the beginning of file
196
+ f.seek(0)
197
+ return f
198
+ else:
199
+ raise FileNotFoundError(path)
200
+ elif mode == "wb":
201
+ m = MemoryFile(self, path, kwargs.get("data"))
202
+ if not self._intrans:
203
+ m.commit()
204
+ return m
205
+ else:
206
+ name = self.__class__.__name__
207
+ raise ValueError(f"unsupported file mode for {name}: {mode!r}")
208
+
209
+ def cp_file(self, path1, path2, **kwargs):
210
+ path1 = self._strip_protocol(path1)
211
+ path2 = self._strip_protocol(path2)
212
+ if self.isfile(path1):
213
+ self.store[path2] = MemoryFile(
214
+ self, path2, self.store[path1].getvalue()
215
+ ) # implicit copy
216
+ elif self.isdir(path1):
217
+ if path2 not in self.pseudo_dirs:
218
+ self.pseudo_dirs.append(path2)
219
+ else:
220
+ raise FileNotFoundError(path1)
221
+
222
+ def cat_file(self, path, start=None, end=None, **kwargs):
223
+ logger.debug("cat: %s", path)
224
+ path = self._strip_protocol(path)
225
+ try:
226
+ return bytes(self.store[path].getbuffer()[start:end])
227
+ except KeyError:
228
+ raise FileNotFoundError(path)
229
+
230
+ def _rm(self, path):
231
+ path = self._strip_protocol(path)
232
+ try:
233
+ del self.store[path]
234
+ except KeyError as e:
235
+ raise FileNotFoundError(path) from e
236
+
237
+ def modified(self, path):
238
+ path = self._strip_protocol(path)
239
+ try:
240
+ return self.store[path].modified
241
+ except KeyError:
242
+ raise FileNotFoundError(path)
243
+
244
+ def created(self, path):
245
+ path = self._strip_protocol(path)
246
+ try:
247
+ return self.store[path].created
248
+ except KeyError:
249
+ raise FileNotFoundError(path)
250
+
251
+ def rm(self, path, recursive=False, maxdepth=None):
252
+ if isinstance(path, str):
253
+ path = self._strip_protocol(path)
254
+ else:
255
+ path = [self._strip_protocol(p) for p in path]
256
+ paths = self.expand_path(path, recursive=recursive, maxdepth=maxdepth)
257
+ for p in reversed(paths):
258
+ # If the expanded path doesn't exist, it is only because the expanded
259
+ # path was a directory that does not exist in self.pseudo_dirs. This
260
+ # is possible if you directly create files without making the
261
+ # directories first.
262
+ if not self.exists(p):
263
+ continue
264
+ if self.isfile(p):
265
+ self.rm_file(p)
266
+ else:
267
+ self.rmdir(p)
268
+
269
+
270
+ class MemoryFile(BytesIO):
271
+ """A BytesIO which can't close and works as a context manager
272
+
273
+ Can initialise with data. Each path should only be active once at any moment.
274
+
275
+ No need to provide fs, path if auto-committing (default)
276
+ """
277
+
278
+ def __init__(self, fs=None, path=None, data=None):
279
+ logger.debug("open file %s", path)
280
+ self.fs = fs
281
+ self.path = path
282
+ self.created = datetime.now(tz=timezone.utc)
283
+ self.modified = datetime.now(tz=timezone.utc)
284
+ if data:
285
+ super().__init__(data)
286
+ self.seek(0)
287
+
288
+ @property
289
+ def size(self):
290
+ return self.getbuffer().nbytes
291
+
292
+ def __enter__(self):
293
+ return self
294
+
295
+ def close(self):
296
+ pass
297
+
298
+ def discard(self):
299
+ pass
300
+
301
+ def commit(self):
302
+ self.fs.store[self.path] = self
303
+ self.modified = datetime.now(tz=timezone.utc)
parrot/lib/python3.10/site-packages/fsspec/implementations/smb.py ADDED
@@ -0,0 +1,343 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This module contains SMBFileSystem class responsible for handling access to
3
+ Windows Samba network shares by using package smbprotocol
4
+ """
5
+
6
+ import datetime
7
+ import uuid
8
+ from stat import S_ISDIR, S_ISLNK
9
+
10
+ import smbclient
11
+
12
+ from .. import AbstractFileSystem
13
+ from ..utils import infer_storage_options
14
+
15
+ # ! pylint: disable=bad-continuation
16
+
17
+
18
+ class SMBFileSystem(AbstractFileSystem):
19
+ """Allow reading and writing to Windows and Samba network shares.
20
+
21
+ When using `fsspec.open()` for getting a file-like object the URI
22
+ should be specified as this format:
23
+ ``smb://workgroup;user:password@server:port/share/folder/file.csv``.
24
+
25
+ Example::
26
+
27
+ >>> import fsspec
28
+ >>> with fsspec.open(
29
+ ... 'smb://myuser:mypassword@myserver.com/' 'share/folder/file.csv'
30
+ ... ) as smbfile:
31
+ ... df = pd.read_csv(smbfile, sep='|', header=None)
32
+
33
+ Note that you need to pass in a valid hostname or IP address for the host
34
+ component of the URL. Do not use the Windows/NetBIOS machine name for the
35
+ host component.
36
+
37
+ The first component of the path in the URL points to the name of the shared
38
+ folder. Subsequent path components will point to the directory/folder/file.
39
+
40
+ The URL components ``workgroup`` , ``user``, ``password`` and ``port`` may be
41
+ optional.
42
+
43
+ .. note::
44
+
45
+ For working this source require `smbprotocol`_ to be installed, e.g.::
46
+
47
+ $ pip install smbprotocol
48
+ # or
49
+ # pip install smbprotocol[kerberos]
50
+
51
+ .. _smbprotocol: https://github.com/jborean93/smbprotocol#requirements
52
+
53
+ Note: if using this with the ``open`` or ``open_files``, with full URLs,
54
+ there is no way to tell if a path is relative, so all paths are assumed
55
+ to be absolute.
56
+ """
57
+
58
+ protocol = "smb"
59
+
60
+ # pylint: disable=too-many-arguments
61
+ def __init__(
62
+ self,
63
+ host,
64
+ port=None,
65
+ username=None,
66
+ password=None,
67
+ timeout=60,
68
+ encrypt=None,
69
+ share_access=None,
70
+ register_session_retries=5,
71
+ auto_mkdir=False,
72
+ **kwargs,
73
+ ):
74
+ """
75
+ You can use _get_kwargs_from_urls to get some kwargs from
76
+ a reasonable SMB url.
77
+
78
+ Authentication will be anonymous or integrated if username/password are not
79
+ given.
80
+
81
+ Parameters
82
+ ----------
83
+ host: str
84
+ The remote server name/ip to connect to
85
+ port: int or None
86
+ Port to connect with. Usually 445, sometimes 139.
87
+ username: str or None
88
+ Username to connect with. Required if Kerberos auth is not being used.
89
+ password: str or None
90
+ User's password on the server, if using username
91
+ timeout: int
92
+ Connection timeout in seconds
93
+ encrypt: bool
94
+ Whether to force encryption or not, once this has been set to True
95
+ the session cannot be changed back to False.
96
+ share_access: str or None
97
+ Specifies the default access applied to file open operations
98
+ performed with this file system object.
99
+ This affects whether other processes can concurrently open a handle
100
+ to the same file.
101
+
102
+ - None (the default): exclusively locks the file until closed.
103
+ - 'r': Allow other handles to be opened with read access.
104
+ - 'w': Allow other handles to be opened with write access.
105
+ - 'd': Allow other handles to be opened with delete access.
106
+ auto_mkdir: bool
107
+ Whether, when opening a file, the directory containing it should
108
+ be created (if it doesn't already exist). This is assumed by pyarrow
109
+ and zarr-python code.
110
+ """
111
+ super().__init__(**kwargs)
112
+ self.host = host
113
+ self.port = port
114
+ self.username = username
115
+ self.password = password
116
+ self.timeout = timeout
117
+ self.encrypt = encrypt
118
+ self.temppath = kwargs.pop("temppath", "")
119
+ self.share_access = share_access
120
+ self.register_session_retries = register_session_retries
121
+ self.auto_mkdir = auto_mkdir
122
+ self._connect()
123
+
124
+ @property
125
+ def _port(self):
126
+ return 445 if self.port is None else self.port
127
+
128
+ def _connect(self):
129
+ import time
130
+
131
+ for _ in range(self.register_session_retries):
132
+ try:
133
+ smbclient.register_session(
134
+ self.host,
135
+ username=self.username,
136
+ password=self.password,
137
+ port=self._port,
138
+ encrypt=self.encrypt,
139
+ connection_timeout=self.timeout,
140
+ )
141
+ break
142
+ except Exception:
143
+ time.sleep(0.1)
144
+
145
+ @classmethod
146
+ def _strip_protocol(cls, path):
147
+ return infer_storage_options(path)["path"]
148
+
149
+ @staticmethod
150
+ def _get_kwargs_from_urls(path):
151
+ # smb://workgroup;user:password@host:port/share/folder/file.csv
152
+ out = infer_storage_options(path)
153
+ out.pop("path", None)
154
+ out.pop("protocol", None)
155
+ return out
156
+
157
+ def mkdir(self, path, create_parents=True, **kwargs):
158
+ wpath = _as_unc_path(self.host, path)
159
+ if create_parents:
160
+ smbclient.makedirs(wpath, exist_ok=False, port=self._port, **kwargs)
161
+ else:
162
+ smbclient.mkdir(wpath, port=self._port, **kwargs)
163
+
164
+ def makedirs(self, path, exist_ok=False):
165
+ if _share_has_path(path):
166
+ wpath = _as_unc_path(self.host, path)
167
+ smbclient.makedirs(wpath, exist_ok=exist_ok, port=self._port)
168
+
169
+ def rmdir(self, path):
170
+ if _share_has_path(path):
171
+ wpath = _as_unc_path(self.host, path)
172
+ smbclient.rmdir(wpath, port=self._port)
173
+
174
+ def info(self, path, **kwargs):
175
+ wpath = _as_unc_path(self.host, path)
176
+ stats = smbclient.stat(wpath, port=self._port, **kwargs)
177
+ if S_ISDIR(stats.st_mode):
178
+ stype = "directory"
179
+ elif S_ISLNK(stats.st_mode):
180
+ stype = "link"
181
+ else:
182
+ stype = "file"
183
+ res = {
184
+ "name": path + "/" if stype == "directory" else path,
185
+ "size": stats.st_size,
186
+ "type": stype,
187
+ "uid": stats.st_uid,
188
+ "gid": stats.st_gid,
189
+ "time": stats.st_atime,
190
+ "mtime": stats.st_mtime,
191
+ }
192
+ return res
193
+
194
+ def created(self, path):
195
+ """Return the created timestamp of a file as a datetime.datetime"""
196
+ wpath = _as_unc_path(self.host, path)
197
+ stats = smbclient.stat(wpath, port=self._port)
198
+ return datetime.datetime.fromtimestamp(stats.st_ctime, tz=datetime.timezone.utc)
199
+
200
+ def modified(self, path):
201
+ """Return the modified timestamp of a file as a datetime.datetime"""
202
+ wpath = _as_unc_path(self.host, path)
203
+ stats = smbclient.stat(wpath, port=self._port)
204
+ return datetime.datetime.fromtimestamp(stats.st_mtime, tz=datetime.timezone.utc)
205
+
206
+ def ls(self, path, detail=True, **kwargs):
207
+ unc = _as_unc_path(self.host, path)
208
+ listed = smbclient.listdir(unc, port=self._port, **kwargs)
209
+ dirs = ["/".join([path.rstrip("/"), p]) for p in listed]
210
+ if detail:
211
+ dirs = [self.info(d) for d in dirs]
212
+ return dirs
213
+
214
+ # pylint: disable=too-many-arguments
215
+ def _open(
216
+ self,
217
+ path,
218
+ mode="rb",
219
+ block_size=-1,
220
+ autocommit=True,
221
+ cache_options=None,
222
+ **kwargs,
223
+ ):
224
+ """
225
+ block_size: int or None
226
+ If 0, no buffering, 1, line buffering, >1, buffer that many bytes
227
+
228
+ Notes
229
+ -----
230
+ By specifying 'share_access' in 'kwargs' it is possible to override the
231
+ default shared access setting applied in the constructor of this object.
232
+ """
233
+ if self.auto_mkdir and "w" in mode:
234
+ self.makedirs(self._parent(path), exist_ok=True)
235
+ bls = block_size if block_size is not None and block_size >= 0 else -1
236
+ wpath = _as_unc_path(self.host, path)
237
+ share_access = kwargs.pop("share_access", self.share_access)
238
+ if "w" in mode and autocommit is False:
239
+ temp = _as_temp_path(self.host, path, self.temppath)
240
+ return SMBFileOpener(
241
+ wpath, temp, mode, port=self._port, block_size=bls, **kwargs
242
+ )
243
+ return smbclient.open_file(
244
+ wpath,
245
+ mode,
246
+ buffering=bls,
247
+ share_access=share_access,
248
+ port=self._port,
249
+ **kwargs,
250
+ )
251
+
252
+ def copy(self, path1, path2, **kwargs):
253
+ """Copy within two locations in the same filesystem"""
254
+ wpath1 = _as_unc_path(self.host, path1)
255
+ wpath2 = _as_unc_path(self.host, path2)
256
+ if self.auto_mkdir:
257
+ self.makedirs(self._parent(path2), exist_ok=True)
258
+ smbclient.copyfile(wpath1, wpath2, port=self._port, **kwargs)
259
+
260
+ def _rm(self, path):
261
+ if _share_has_path(path):
262
+ wpath = _as_unc_path(self.host, path)
263
+ stats = smbclient.stat(wpath, port=self._port)
264
+ if S_ISDIR(stats.st_mode):
265
+ smbclient.rmdir(wpath, port=self._port)
266
+ else:
267
+ smbclient.remove(wpath, port=self._port)
268
+
269
+ def mv(self, path1, path2, recursive=None, maxdepth=None, **kwargs):
270
+ wpath1 = _as_unc_path(self.host, path1)
271
+ wpath2 = _as_unc_path(self.host, path2)
272
+ smbclient.rename(wpath1, wpath2, port=self._port, **kwargs)
273
+
274
+
275
+ def _as_unc_path(host, path):
276
+ rpath = path.replace("/", "\\")
277
+ unc = f"\\\\{host}{rpath}"
278
+ return unc
279
+
280
+
281
+ def _as_temp_path(host, path, temppath):
282
+ share = path.split("/")[1]
283
+ temp_file = f"/{share}{temppath}/{uuid.uuid4()}"
284
+ unc = _as_unc_path(host, temp_file)
285
+ return unc
286
+
287
+
288
+ def _share_has_path(path):
289
+ parts = path.count("/")
290
+ if path.endswith("/"):
291
+ return parts > 2
292
+ return parts > 1
293
+
294
+
295
+ class SMBFileOpener:
296
+ """writes to remote temporary file, move on commit"""
297
+
298
+ def __init__(self, path, temp, mode, port=445, block_size=-1, **kwargs):
299
+ self.path = path
300
+ self.temp = temp
301
+ self.mode = mode
302
+ self.block_size = block_size
303
+ self.kwargs = kwargs
304
+ self.smbfile = None
305
+ self._incontext = False
306
+ self.port = port
307
+ self._open()
308
+
309
+ def _open(self):
310
+ if self.smbfile is None or self.smbfile.closed:
311
+ self.smbfile = smbclient.open_file(
312
+ self.temp,
313
+ self.mode,
314
+ port=self.port,
315
+ buffering=self.block_size,
316
+ **self.kwargs,
317
+ )
318
+
319
+ def commit(self):
320
+ """Move temp file to definitive on success."""
321
+ # TODO: use transaction support in SMB protocol
322
+ smbclient.replace(self.temp, self.path, port=self.port)
323
+
324
+ def discard(self):
325
+ """Remove the temp file on failure."""
326
+ smbclient.remove(self.temp, port=self.port)
327
+
328
+ def __fspath__(self):
329
+ return self.path
330
+
331
+ def __iter__(self):
332
+ return self.smbfile.__iter__()
333
+
334
+ def __getattr__(self, item):
335
+ return getattr(self.smbfile, item)
336
+
337
+ def __enter__(self):
338
+ self._incontext = True
339
+ return self.smbfile.__enter__()
340
+
341
+ def __exit__(self, exc_type, exc_value, traceback):
342
+ self._incontext = False
343
+ self.smbfile.__exit__(exc_type, exc_value, traceback)
parrot/lib/python3.10/site-packages/fsspec/mapping.py ADDED
@@ -0,0 +1,251 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import array
2
+ import logging
3
+ import posixpath
4
+ import warnings
5
+ from collections.abc import MutableMapping
6
+ from functools import cached_property
7
+
8
+ from fsspec.core import url_to_fs
9
+
10
+ logger = logging.getLogger("fsspec.mapping")
11
+
12
+
13
+ class FSMap(MutableMapping):
14
+ """Wrap a FileSystem instance as a mutable wrapping.
15
+
16
+ The keys of the mapping become files under the given root, and the
17
+ values (which must be bytes) the contents of those files.
18
+
19
+ Parameters
20
+ ----------
21
+ root: string
22
+ prefix for all the files
23
+ fs: FileSystem instance
24
+ check: bool (=True)
25
+ performs a touch at the location, to check for write access.
26
+
27
+ Examples
28
+ --------
29
+ >>> fs = FileSystem(**parameters) # doctest: +SKIP
30
+ >>> d = FSMap('my-data/path/', fs) # doctest: +SKIP
31
+ or, more likely
32
+ >>> d = fs.get_mapper('my-data/path/')
33
+
34
+ >>> d['loc1'] = b'Hello World' # doctest: +SKIP
35
+ >>> list(d.keys()) # doctest: +SKIP
36
+ ['loc1']
37
+ >>> d['loc1'] # doctest: +SKIP
38
+ b'Hello World'
39
+ """
40
+
41
+ def __init__(self, root, fs, check=False, create=False, missing_exceptions=None):
42
+ self.fs = fs
43
+ self.root = fs._strip_protocol(root)
44
+ self._root_key_to_str = fs._strip_protocol(posixpath.join(root, "x"))[:-1]
45
+ if missing_exceptions is None:
46
+ missing_exceptions = (
47
+ FileNotFoundError,
48
+ IsADirectoryError,
49
+ NotADirectoryError,
50
+ )
51
+ self.missing_exceptions = missing_exceptions
52
+ self.check = check
53
+ self.create = create
54
+ if create:
55
+ if not self.fs.exists(root):
56
+ self.fs.mkdir(root)
57
+ if check:
58
+ if not self.fs.exists(root):
59
+ raise ValueError(
60
+ f"Path {root} does not exist. Create "
61
+ f" with the ``create=True`` keyword"
62
+ )
63
+ self.fs.touch(root + "/a")
64
+ self.fs.rm(root + "/a")
65
+
66
+ @cached_property
67
+ def dirfs(self):
68
+ """dirfs instance that can be used with the same keys as the mapper"""
69
+ from .implementations.dirfs import DirFileSystem
70
+
71
+ return DirFileSystem(path=self._root_key_to_str, fs=self.fs)
72
+
73
+ def clear(self):
74
+ """Remove all keys below root - empties out mapping"""
75
+ logger.info("Clear mapping at %s", self.root)
76
+ try:
77
+ self.fs.rm(self.root, True)
78
+ self.fs.mkdir(self.root)
79
+ except: # noqa: E722
80
+ pass
81
+
82
+ def getitems(self, keys, on_error="raise"):
83
+ """Fetch multiple items from the store
84
+
85
+ If the backend is async-able, this might proceed concurrently
86
+
87
+ Parameters
88
+ ----------
89
+ keys: list(str)
90
+ They keys to be fetched
91
+ on_error : "raise", "omit", "return"
92
+ If raise, an underlying exception will be raised (converted to KeyError
93
+ if the type is in self.missing_exceptions); if omit, keys with exception
94
+ will simply not be included in the output; if "return", all keys are
95
+ included in the output, but the value will be bytes or an exception
96
+ instance.
97
+
98
+ Returns
99
+ -------
100
+ dict(key, bytes|exception)
101
+ """
102
+ keys2 = [self._key_to_str(k) for k in keys]
103
+ oe = on_error if on_error == "raise" else "return"
104
+ try:
105
+ out = self.fs.cat(keys2, on_error=oe)
106
+ if isinstance(out, bytes):
107
+ out = {keys2[0]: out}
108
+ except self.missing_exceptions as e:
109
+ raise KeyError from e
110
+ out = {
111
+ k: (KeyError() if isinstance(v, self.missing_exceptions) else v)
112
+ for k, v in out.items()
113
+ }
114
+ return {
115
+ key: out[k2]
116
+ for key, k2 in zip(keys, keys2)
117
+ if on_error == "return" or not isinstance(out[k2], BaseException)
118
+ }
119
+
120
+ def setitems(self, values_dict):
121
+ """Set the values of multiple items in the store
122
+
123
+ Parameters
124
+ ----------
125
+ values_dict: dict(str, bytes)
126
+ """
127
+ values = {self._key_to_str(k): maybe_convert(v) for k, v in values_dict.items()}
128
+ self.fs.pipe(values)
129
+
130
+ def delitems(self, keys):
131
+ """Remove multiple keys from the store"""
132
+ self.fs.rm([self._key_to_str(k) for k in keys])
133
+
134
+ def _key_to_str(self, key):
135
+ """Generate full path for the key"""
136
+ if not isinstance(key, str):
137
+ # raise TypeError("key must be of type `str`, got `{type(key).__name__}`"
138
+ warnings.warn(
139
+ "from fsspec 2023.5 onward FSMap non-str keys will raise TypeError",
140
+ DeprecationWarning,
141
+ )
142
+ if isinstance(key, list):
143
+ key = tuple(key)
144
+ key = str(key)
145
+ return f"{self._root_key_to_str}{key}".rstrip("/")
146
+
147
+ def _str_to_key(self, s):
148
+ """Strip path of to leave key name"""
149
+ return s[len(self.root) :].lstrip("/")
150
+
151
+ def __getitem__(self, key, default=None):
152
+ """Retrieve data"""
153
+ k = self._key_to_str(key)
154
+ try:
155
+ result = self.fs.cat(k)
156
+ except self.missing_exceptions:
157
+ if default is not None:
158
+ return default
159
+ raise KeyError(key)
160
+ return result
161
+
162
+ def pop(self, key, default=None):
163
+ """Pop data"""
164
+ result = self.__getitem__(key, default)
165
+ try:
166
+ del self[key]
167
+ except KeyError:
168
+ pass
169
+ return result
170
+
171
+ def __setitem__(self, key, value):
172
+ """Store value in key"""
173
+ key = self._key_to_str(key)
174
+ self.fs.mkdirs(self.fs._parent(key), exist_ok=True)
175
+ self.fs.pipe_file(key, maybe_convert(value))
176
+
177
+ def __iter__(self):
178
+ return (self._str_to_key(x) for x in self.fs.find(self.root))
179
+
180
+ def __len__(self):
181
+ return len(self.fs.find(self.root))
182
+
183
+ def __delitem__(self, key):
184
+ """Remove key"""
185
+ try:
186
+ self.fs.rm(self._key_to_str(key))
187
+ except: # noqa: E722
188
+ raise KeyError
189
+
190
+ def __contains__(self, key):
191
+ """Does key exist in mapping?"""
192
+ path = self._key_to_str(key)
193
+ return self.fs.isfile(path)
194
+
195
+ def __reduce__(self):
196
+ return FSMap, (self.root, self.fs, False, False, self.missing_exceptions)
197
+
198
+
199
+ def maybe_convert(value):
200
+ if isinstance(value, array.array) or hasattr(value, "__array__"):
201
+ # bytes-like things
202
+ if hasattr(value, "dtype") and value.dtype.kind in "Mm":
203
+ # The buffer interface doesn't support datetime64/timdelta64 numpy
204
+ # arrays
205
+ value = value.view("int64")
206
+ value = bytes(memoryview(value))
207
+ return value
208
+
209
+
210
+ def get_mapper(
211
+ url="",
212
+ check=False,
213
+ create=False,
214
+ missing_exceptions=None,
215
+ alternate_root=None,
216
+ **kwargs,
217
+ ):
218
+ """Create key-value interface for given URL and options
219
+
220
+ The URL will be of the form "protocol://location" and point to the root
221
+ of the mapper required. All keys will be file-names below this location,
222
+ and their values the contents of each key.
223
+
224
+ Also accepts compound URLs like zip::s3://bucket/file.zip , see ``fsspec.open``.
225
+
226
+ Parameters
227
+ ----------
228
+ url: str
229
+ Root URL of mapping
230
+ check: bool
231
+ Whether to attempt to read from the location before instantiation, to
232
+ check that the mapping does exist
233
+ create: bool
234
+ Whether to make the directory corresponding to the root before
235
+ instantiating
236
+ missing_exceptions: None or tuple
237
+ If given, these exception types will be regarded as missing keys and
238
+ return KeyError when trying to read data. By default, you get
239
+ (FileNotFoundError, IsADirectoryError, NotADirectoryError)
240
+ alternate_root: None or str
241
+ In cases of complex URLs, the parser may fail to pick the correct part
242
+ for the mapper root, so this arg can override
243
+
244
+ Returns
245
+ -------
246
+ ``FSMap`` instance, the dict-like key-value store.
247
+ """
248
+ # Removing protocol here - could defer to each open() on the backend
249
+ fs, urlpath = url_to_fs(url, **kwargs)
250
+ root = alternate_root if alternate_root is not None else urlpath
251
+ return FSMap(root, fs, check, create, missing_exceptions=missing_exceptions)
parrot/lib/python3.10/site-packages/fsspec/parquet.py ADDED
@@ -0,0 +1,541 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import json
3
+ import warnings
4
+
5
+ from .core import url_to_fs
6
+ from .utils import merge_offset_ranges
7
+
8
+ # Parquet-Specific Utilities for fsspec
9
+ #
10
+ # Most of the functions defined in this module are NOT
11
+ # intended for public consumption. The only exception
12
+ # to this is `open_parquet_file`, which should be used
13
+ # place of `fs.open()` to open parquet-formatted files
14
+ # on remote file systems.
15
+
16
+
17
+ def open_parquet_file(
18
+ path,
19
+ mode="rb",
20
+ fs=None,
21
+ metadata=None,
22
+ columns=None,
23
+ row_groups=None,
24
+ storage_options=None,
25
+ strict=False,
26
+ engine="auto",
27
+ max_gap=64_000,
28
+ max_block=256_000_000,
29
+ footer_sample_size=1_000_000,
30
+ **kwargs,
31
+ ):
32
+ """
33
+ Return a file-like object for a single Parquet file.
34
+
35
+ The specified parquet `engine` will be used to parse the
36
+ footer metadata, and determine the required byte ranges
37
+ from the file. The target path will then be opened with
38
+ the "parts" (`KnownPartsOfAFile`) caching strategy.
39
+
40
+ Note that this method is intended for usage with remote
41
+ file systems, and is unlikely to improve parquet-read
42
+ performance on local file systems.
43
+
44
+ Parameters
45
+ ----------
46
+ path: str
47
+ Target file path.
48
+ mode: str, optional
49
+ Mode option to be passed through to `fs.open`. Default is "rb".
50
+ metadata: Any, optional
51
+ Parquet metadata object. Object type must be supported
52
+ by the backend parquet engine. For now, only the "fastparquet"
53
+ engine supports an explicit `ParquetFile` metadata object.
54
+ If a metadata object is supplied, the remote footer metadata
55
+ will not need to be transferred into local memory.
56
+ fs: AbstractFileSystem, optional
57
+ Filesystem object to use for opening the file. If nothing is
58
+ specified, an `AbstractFileSystem` object will be inferred.
59
+ engine : str, default "auto"
60
+ Parquet engine to use for metadata parsing. Allowed options
61
+ include "fastparquet", "pyarrow", and "auto". The specified
62
+ engine must be installed in the current environment. If
63
+ "auto" is specified, and both engines are installed,
64
+ "fastparquet" will take precedence over "pyarrow".
65
+ columns: list, optional
66
+ List of all column names that may be read from the file.
67
+ row_groups : list, optional
68
+ List of all row-groups that may be read from the file. This
69
+ may be a list of row-group indices (integers), or it may be
70
+ a list of `RowGroup` metadata objects (if the "fastparquet"
71
+ engine is used).
72
+ storage_options : dict, optional
73
+ Used to generate an `AbstractFileSystem` object if `fs` was
74
+ not specified.
75
+ strict : bool, optional
76
+ Whether the resulting `KnownPartsOfAFile` cache should
77
+ fetch reads that go beyond a known byte-range boundary.
78
+ If `False` (the default), any read that ends outside a
79
+ known part will be zero padded. Note that using
80
+ `strict=True` may be useful for debugging.
81
+ max_gap : int, optional
82
+ Neighboring byte ranges will only be merged when their
83
+ inter-range gap is <= `max_gap`. Default is 64KB.
84
+ max_block : int, optional
85
+ Neighboring byte ranges will only be merged when the size of
86
+ the aggregated range is <= `max_block`. Default is 256MB.
87
+ footer_sample_size : int, optional
88
+ Number of bytes to read from the end of the path to look
89
+ for the footer metadata. If the sampled bytes do not contain
90
+ the footer, a second read request will be required, and
91
+ performance will suffer. Default is 1MB.
92
+ **kwargs :
93
+ Optional key-word arguments to pass to `fs.open`
94
+ """
95
+
96
+ # Make sure we have an `AbstractFileSystem` object
97
+ # to work with
98
+ if fs is None:
99
+ fs = url_to_fs(path, **(storage_options or {}))[0]
100
+
101
+ # For now, `columns == []` not supported. Just use
102
+ # default `open` command with `path` input
103
+ if columns is not None and len(columns) == 0:
104
+ return fs.open(path, mode=mode)
105
+
106
+ # Set the engine
107
+ engine = _set_engine(engine)
108
+
109
+ # Fetch the known byte ranges needed to read
110
+ # `columns` and/or `row_groups`
111
+ data = _get_parquet_byte_ranges(
112
+ [path],
113
+ fs,
114
+ metadata=metadata,
115
+ columns=columns,
116
+ row_groups=row_groups,
117
+ engine=engine,
118
+ max_gap=max_gap,
119
+ max_block=max_block,
120
+ footer_sample_size=footer_sample_size,
121
+ )
122
+
123
+ # Extract file name from `data`
124
+ fn = next(iter(data)) if data else path
125
+
126
+ # Call self.open with "parts" caching
127
+ options = kwargs.pop("cache_options", {}).copy()
128
+ return fs.open(
129
+ fn,
130
+ mode=mode,
131
+ cache_type="parts",
132
+ cache_options={
133
+ **options,
134
+ "data": data.get(fn, {}),
135
+ "strict": strict,
136
+ },
137
+ **kwargs,
138
+ )
139
+
140
+
141
+ def _get_parquet_byte_ranges(
142
+ paths,
143
+ fs,
144
+ metadata=None,
145
+ columns=None,
146
+ row_groups=None,
147
+ max_gap=64_000,
148
+ max_block=256_000_000,
149
+ footer_sample_size=1_000_000,
150
+ engine="auto",
151
+ ):
152
+ """Get a dictionary of the known byte ranges needed
153
+ to read a specific column/row-group selection from a
154
+ Parquet dataset. Each value in the output dictionary
155
+ is intended for use as the `data` argument for the
156
+ `KnownPartsOfAFile` caching strategy of a single path.
157
+ """
158
+
159
+ # Set engine if necessary
160
+ if isinstance(engine, str):
161
+ engine = _set_engine(engine)
162
+
163
+ # Pass to specialized function if metadata is defined
164
+ if metadata is not None:
165
+ # Use the provided parquet metadata object
166
+ # to avoid transferring/parsing footer metadata
167
+ return _get_parquet_byte_ranges_from_metadata(
168
+ metadata,
169
+ fs,
170
+ engine,
171
+ columns=columns,
172
+ row_groups=row_groups,
173
+ max_gap=max_gap,
174
+ max_block=max_block,
175
+ )
176
+
177
+ # Get file sizes asynchronously
178
+ file_sizes = fs.sizes(paths)
179
+
180
+ # Populate global paths, starts, & ends
181
+ result = {}
182
+ data_paths = []
183
+ data_starts = []
184
+ data_ends = []
185
+ add_header_magic = True
186
+ if columns is None and row_groups is None:
187
+ # We are NOT selecting specific columns or row-groups.
188
+ #
189
+ # We can avoid sampling the footers, and just transfer
190
+ # all file data with cat_ranges
191
+ for i, path in enumerate(paths):
192
+ result[path] = {}
193
+ for b in range(0, file_sizes[i], max_block):
194
+ data_paths.append(path)
195
+ data_starts.append(b)
196
+ data_ends.append(min(b + max_block, file_sizes[i]))
197
+ add_header_magic = False # "Magic" should already be included
198
+ else:
199
+ # We ARE selecting specific columns or row-groups.
200
+ #
201
+ # Gather file footers.
202
+ # We just take the last `footer_sample_size` bytes of each
203
+ # file (or the entire file if it is smaller than that)
204
+ footer_starts = []
205
+ footer_ends = []
206
+ for i, path in enumerate(paths):
207
+ footer_ends.append(file_sizes[i])
208
+ sample_size = max(0, file_sizes[i] - footer_sample_size)
209
+ footer_starts.append(sample_size)
210
+ footer_samples = fs.cat_ranges(paths, footer_starts, footer_ends)
211
+
212
+ # Check our footer samples and re-sample if necessary.
213
+ missing_footer_starts = footer_starts.copy()
214
+ large_footer = 0
215
+ for i, path in enumerate(paths):
216
+ footer_size = int.from_bytes(footer_samples[i][-8:-4], "little")
217
+ real_footer_start = file_sizes[i] - (footer_size + 8)
218
+ if real_footer_start < footer_starts[i]:
219
+ missing_footer_starts[i] = real_footer_start
220
+ large_footer = max(large_footer, (footer_size + 8))
221
+ if large_footer:
222
+ warnings.warn(
223
+ f"Not enough data was used to sample the parquet footer. "
224
+ f"Try setting footer_sample_size >= {large_footer}."
225
+ )
226
+ for i, block in enumerate(
227
+ fs.cat_ranges(
228
+ paths,
229
+ missing_footer_starts,
230
+ footer_starts,
231
+ )
232
+ ):
233
+ footer_samples[i] = block + footer_samples[i]
234
+ footer_starts[i] = missing_footer_starts[i]
235
+
236
+ # Calculate required byte ranges for each path
237
+ for i, path in enumerate(paths):
238
+ # Deal with small-file case.
239
+ # Just include all remaining bytes of the file
240
+ # in a single range.
241
+ if file_sizes[i] < max_block:
242
+ if footer_starts[i] > 0:
243
+ # Only need to transfer the data if the
244
+ # footer sample isn't already the whole file
245
+ data_paths.append(path)
246
+ data_starts.append(0)
247
+ data_ends.append(footer_starts[i])
248
+ continue
249
+
250
+ # Use "engine" to collect data byte ranges
251
+ path_data_starts, path_data_ends = engine._parquet_byte_ranges(
252
+ columns,
253
+ row_groups=row_groups,
254
+ footer=footer_samples[i],
255
+ footer_start=footer_starts[i],
256
+ )
257
+
258
+ data_paths += [path] * len(path_data_starts)
259
+ data_starts += path_data_starts
260
+ data_ends += path_data_ends
261
+
262
+ # Merge adjacent offset ranges
263
+ data_paths, data_starts, data_ends = merge_offset_ranges(
264
+ data_paths,
265
+ data_starts,
266
+ data_ends,
267
+ max_gap=max_gap,
268
+ max_block=max_block,
269
+ sort=False, # Should already be sorted
270
+ )
271
+
272
+ # Start by populating `result` with footer samples
273
+ for i, path in enumerate(paths):
274
+ result[path] = {(footer_starts[i], footer_ends[i]): footer_samples[i]}
275
+
276
+ # Transfer the data byte-ranges into local memory
277
+ _transfer_ranges(fs, result, data_paths, data_starts, data_ends)
278
+
279
+ # Add b"PAR1" to header if necessary
280
+ if add_header_magic:
281
+ _add_header_magic(result)
282
+
283
+ return result
284
+
285
+
286
+ def _get_parquet_byte_ranges_from_metadata(
287
+ metadata,
288
+ fs,
289
+ engine,
290
+ columns=None,
291
+ row_groups=None,
292
+ max_gap=64_000,
293
+ max_block=256_000_000,
294
+ ):
295
+ """Simplified version of `_get_parquet_byte_ranges` for
296
+ the case that an engine-specific `metadata` object is
297
+ provided, and the remote footer metadata does not need to
298
+ be transferred before calculating the required byte ranges.
299
+ """
300
+
301
+ # Use "engine" to collect data byte ranges
302
+ data_paths, data_starts, data_ends = engine._parquet_byte_ranges(
303
+ columns,
304
+ row_groups=row_groups,
305
+ metadata=metadata,
306
+ )
307
+
308
+ # Merge adjacent offset ranges
309
+ data_paths, data_starts, data_ends = merge_offset_ranges(
310
+ data_paths,
311
+ data_starts,
312
+ data_ends,
313
+ max_gap=max_gap,
314
+ max_block=max_block,
315
+ sort=False, # Should be sorted
316
+ )
317
+
318
+ # Transfer the data byte-ranges into local memory
319
+ result = {fn: {} for fn in list(set(data_paths))}
320
+ _transfer_ranges(fs, result, data_paths, data_starts, data_ends)
321
+
322
+ # Add b"PAR1" to header
323
+ _add_header_magic(result)
324
+
325
+ return result
326
+
327
+
328
+ def _transfer_ranges(fs, blocks, paths, starts, ends):
329
+ # Use cat_ranges to gather the data byte_ranges
330
+ ranges = (paths, starts, ends)
331
+ for path, start, stop, data in zip(*ranges, fs.cat_ranges(*ranges)):
332
+ blocks[path][(start, stop)] = data
333
+
334
+
335
+ def _add_header_magic(data):
336
+ # Add b"PAR1" to file headers
337
+ for path in list(data.keys()):
338
+ add_magic = True
339
+ for k in data[path].keys():
340
+ if k[0] == 0 and k[1] >= 4:
341
+ add_magic = False
342
+ break
343
+ if add_magic:
344
+ data[path][(0, 4)] = b"PAR1"
345
+
346
+
347
+ def _set_engine(engine_str):
348
+ # Define a list of parquet engines to try
349
+ if engine_str == "auto":
350
+ try_engines = ("fastparquet", "pyarrow")
351
+ elif not isinstance(engine_str, str):
352
+ raise ValueError(
353
+ "Failed to set parquet engine! "
354
+ "Please pass 'fastparquet', 'pyarrow', or 'auto'"
355
+ )
356
+ elif engine_str not in ("fastparquet", "pyarrow"):
357
+ raise ValueError(f"{engine_str} engine not supported by `fsspec.parquet`")
358
+ else:
359
+ try_engines = [engine_str]
360
+
361
+ # Try importing the engines in `try_engines`,
362
+ # and choose the first one that succeeds
363
+ for engine in try_engines:
364
+ try:
365
+ if engine == "fastparquet":
366
+ return FastparquetEngine()
367
+ elif engine == "pyarrow":
368
+ return PyarrowEngine()
369
+ except ImportError:
370
+ pass
371
+
372
+ # Raise an error if a supported parquet engine
373
+ # was not found
374
+ raise ImportError(
375
+ f"The following parquet engines are not installed "
376
+ f"in your python environment: {try_engines}."
377
+ f"Please install 'fastparquert' or 'pyarrow' to "
378
+ f"utilize the `fsspec.parquet` module."
379
+ )
380
+
381
+
382
+ class FastparquetEngine:
383
+ # The purpose of the FastparquetEngine class is
384
+ # to check if fastparquet can be imported (on initialization)
385
+ # and to define a `_parquet_byte_ranges` method. In the
386
+ # future, this class may also be used to define other
387
+ # methods/logic that are specific to fastparquet.
388
+
389
+ def __init__(self):
390
+ import fastparquet as fp
391
+
392
+ self.fp = fp
393
+
394
+ def _row_group_filename(self, row_group, pf):
395
+ return pf.row_group_filename(row_group)
396
+
397
+ def _parquet_byte_ranges(
398
+ self,
399
+ columns,
400
+ row_groups=None,
401
+ metadata=None,
402
+ footer=None,
403
+ footer_start=None,
404
+ ):
405
+ # Initialize offset ranges and define ParqetFile metadata
406
+ pf = metadata
407
+ data_paths, data_starts, data_ends = [], [], []
408
+ if pf is None:
409
+ pf = self.fp.ParquetFile(io.BytesIO(footer))
410
+
411
+ # Convert columns to a set and add any index columns
412
+ # specified in the pandas metadata (just in case)
413
+ column_set = None if columns is None else set(columns)
414
+ if column_set is not None and hasattr(pf, "pandas_metadata"):
415
+ md_index = [
416
+ ind
417
+ for ind in pf.pandas_metadata.get("index_columns", [])
418
+ # Ignore RangeIndex information
419
+ if not isinstance(ind, dict)
420
+ ]
421
+ column_set |= set(md_index)
422
+
423
+ # Check if row_groups is a list of integers
424
+ # or a list of row-group metadata
425
+ if row_groups and not isinstance(row_groups[0], int):
426
+ # Input row_groups contains row-group metadata
427
+ row_group_indices = None
428
+ else:
429
+ # Input row_groups contains row-group indices
430
+ row_group_indices = row_groups
431
+ row_groups = pf.row_groups
432
+
433
+ # Loop through column chunks to add required byte ranges
434
+ for r, row_group in enumerate(row_groups):
435
+ # Skip this row-group if we are targeting
436
+ # specific row-groups
437
+ if row_group_indices is None or r in row_group_indices:
438
+ # Find the target parquet-file path for `row_group`
439
+ fn = self._row_group_filename(row_group, pf)
440
+
441
+ for column in row_group.columns:
442
+ name = column.meta_data.path_in_schema[0]
443
+ # Skip this column if we are targeting a
444
+ # specific columns
445
+ if column_set is None or name in column_set:
446
+ file_offset0 = column.meta_data.dictionary_page_offset
447
+ if file_offset0 is None:
448
+ file_offset0 = column.meta_data.data_page_offset
449
+ num_bytes = column.meta_data.total_compressed_size
450
+ if footer_start is None or file_offset0 < footer_start:
451
+ data_paths.append(fn)
452
+ data_starts.append(file_offset0)
453
+ data_ends.append(
454
+ min(
455
+ file_offset0 + num_bytes,
456
+ footer_start or (file_offset0 + num_bytes),
457
+ )
458
+ )
459
+
460
+ if metadata:
461
+ # The metadata in this call may map to multiple
462
+ # file paths. Need to include `data_paths`
463
+ return data_paths, data_starts, data_ends
464
+ return data_starts, data_ends
465
+
466
+
467
+ class PyarrowEngine:
468
+ # The purpose of the PyarrowEngine class is
469
+ # to check if pyarrow can be imported (on initialization)
470
+ # and to define a `_parquet_byte_ranges` method. In the
471
+ # future, this class may also be used to define other
472
+ # methods/logic that are specific to pyarrow.
473
+
474
+ def __init__(self):
475
+ import pyarrow.parquet as pq
476
+
477
+ self.pq = pq
478
+
479
+ def _row_group_filename(self, row_group, metadata):
480
+ raise NotImplementedError
481
+
482
+ def _parquet_byte_ranges(
483
+ self,
484
+ columns,
485
+ row_groups=None,
486
+ metadata=None,
487
+ footer=None,
488
+ footer_start=None,
489
+ ):
490
+ if metadata is not None:
491
+ raise ValueError("metadata input not supported for PyarrowEngine")
492
+
493
+ data_starts, data_ends = [], []
494
+ md = self.pq.ParquetFile(io.BytesIO(footer)).metadata
495
+
496
+ # Convert columns to a set and add any index columns
497
+ # specified in the pandas metadata (just in case)
498
+ column_set = None if columns is None else set(columns)
499
+ if column_set is not None:
500
+ schema = md.schema.to_arrow_schema()
501
+ has_pandas_metadata = (
502
+ schema.metadata is not None and b"pandas" in schema.metadata
503
+ )
504
+ if has_pandas_metadata:
505
+ md_index = [
506
+ ind
507
+ for ind in json.loads(
508
+ schema.metadata[b"pandas"].decode("utf8")
509
+ ).get("index_columns", [])
510
+ # Ignore RangeIndex information
511
+ if not isinstance(ind, dict)
512
+ ]
513
+ column_set |= set(md_index)
514
+
515
+ # Loop through column chunks to add required byte ranges
516
+ for r in range(md.num_row_groups):
517
+ # Skip this row-group if we are targeting
518
+ # specific row-groups
519
+ if row_groups is None or r in row_groups:
520
+ row_group = md.row_group(r)
521
+ for c in range(row_group.num_columns):
522
+ column = row_group.column(c)
523
+ name = column.path_in_schema
524
+ # Skip this column if we are targeting a
525
+ # specific columns
526
+ split_name = name.split(".")[0]
527
+ if (
528
+ column_set is None
529
+ or name in column_set
530
+ or split_name in column_set
531
+ ):
532
+ file_offset0 = column.dictionary_page_offset
533
+ if file_offset0 is None:
534
+ file_offset0 = column.data_page_offset
535
+ num_bytes = column.total_compressed_size
536
+ if file_offset0 < footer_start:
537
+ data_starts.append(file_offset0)
538
+ data_ends.append(
539
+ min(file_offset0 + num_bytes, footer_start)
540
+ )
541
+ return data_starts, data_ends
parrot/lib/python3.10/site-packages/fsspec/registry.py ADDED
@@ -0,0 +1,311 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import importlib
4
+ import types
5
+ import warnings
6
+
7
+ __all__ = ["registry", "get_filesystem_class", "default"]
8
+
9
+ # internal, mutable
10
+ _registry: dict[str, type] = {}
11
+
12
+ # external, immutable
13
+ registry = types.MappingProxyType(_registry)
14
+ default = "file"
15
+
16
+
17
+ def register_implementation(name, cls, clobber=False, errtxt=None):
18
+ """Add implementation class to the registry
19
+
20
+ Parameters
21
+ ----------
22
+ name: str
23
+ Protocol name to associate with the class
24
+ cls: class or str
25
+ if a class: fsspec-compliant implementation class (normally inherits from
26
+ ``fsspec.AbstractFileSystem``, gets added straight to the registry. If a
27
+ str, the full path to an implementation class like package.module.class,
28
+ which gets added to known_implementations,
29
+ so the import is deferred until the filesystem is actually used.
30
+ clobber: bool (optional)
31
+ Whether to overwrite a protocol with the same name; if False, will raise
32
+ instead.
33
+ errtxt: str (optional)
34
+ If given, then a failure to import the given class will result in this
35
+ text being given.
36
+ """
37
+ if isinstance(cls, str):
38
+ if name in known_implementations and clobber is False:
39
+ if cls != known_implementations[name]["class"]:
40
+ raise ValueError(
41
+ f"Name ({name}) already in the known_implementations and clobber "
42
+ f"is False"
43
+ )
44
+ else:
45
+ known_implementations[name] = {
46
+ "class": cls,
47
+ "err": errtxt or f"{cls} import failed for protocol {name}",
48
+ }
49
+
50
+ else:
51
+ if name in registry and clobber is False:
52
+ if _registry[name] is not cls:
53
+ raise ValueError(
54
+ f"Name ({name}) already in the registry and clobber is False"
55
+ )
56
+ else:
57
+ _registry[name] = cls
58
+
59
+
60
+ # protocols mapped to the class which implements them. This dict can be
61
+ # updated with register_implementation
62
+ known_implementations = {
63
+ "abfs": {
64
+ "class": "adlfs.AzureBlobFileSystem",
65
+ "err": "Install adlfs to access Azure Datalake Gen2 and Azure Blob Storage",
66
+ },
67
+ "adl": {
68
+ "class": "adlfs.AzureDatalakeFileSystem",
69
+ "err": "Install adlfs to access Azure Datalake Gen1",
70
+ },
71
+ "arrow_hdfs": {
72
+ "class": "fsspec.implementations.arrow.HadoopFileSystem",
73
+ "err": "pyarrow and local java libraries required for HDFS",
74
+ },
75
+ "asynclocal": {
76
+ "class": "morefs.asyn_local.AsyncLocalFileSystem",
77
+ "err": "Install 'morefs[asynclocalfs]' to use AsyncLocalFileSystem",
78
+ },
79
+ "az": {
80
+ "class": "adlfs.AzureBlobFileSystem",
81
+ "err": "Install adlfs to access Azure Datalake Gen2 and Azure Blob Storage",
82
+ },
83
+ "blockcache": {"class": "fsspec.implementations.cached.CachingFileSystem"},
84
+ "box": {
85
+ "class": "boxfs.BoxFileSystem",
86
+ "err": "Please install boxfs to access BoxFileSystem",
87
+ },
88
+ "cached": {"class": "fsspec.implementations.cached.CachingFileSystem"},
89
+ "dask": {
90
+ "class": "fsspec.implementations.dask.DaskWorkerFileSystem",
91
+ "err": "Install dask distributed to access worker file system",
92
+ },
93
+ "data": {"class": "fsspec.implementations.data.DataFileSystem"},
94
+ "dbfs": {
95
+ "class": "fsspec.implementations.dbfs.DatabricksFileSystem",
96
+ "err": "Install the requests package to use the DatabricksFileSystem",
97
+ },
98
+ "dir": {"class": "fsspec.implementations.dirfs.DirFileSystem"},
99
+ "dropbox": {
100
+ "class": "dropboxdrivefs.DropboxDriveFileSystem",
101
+ "err": (
102
+ 'DropboxFileSystem requires "dropboxdrivefs","requests" and "'
103
+ '"dropbox" to be installed'
104
+ ),
105
+ },
106
+ "dvc": {
107
+ "class": "dvc.api.DVCFileSystem",
108
+ "err": "Install dvc to access DVCFileSystem",
109
+ },
110
+ "file": {"class": "fsspec.implementations.local.LocalFileSystem"},
111
+ "filecache": {"class": "fsspec.implementations.cached.WholeFileCacheFileSystem"},
112
+ "ftp": {"class": "fsspec.implementations.ftp.FTPFileSystem"},
113
+ "gcs": {
114
+ "class": "gcsfs.GCSFileSystem",
115
+ "err": "Please install gcsfs to access Google Storage",
116
+ },
117
+ "gdrive": {
118
+ "class": "gdrivefs.GoogleDriveFileSystem",
119
+ "err": "Please install gdrivefs for access to Google Drive",
120
+ },
121
+ "generic": {"class": "fsspec.generic.GenericFileSystem"},
122
+ "git": {
123
+ "class": "fsspec.implementations.git.GitFileSystem",
124
+ "err": "Install pygit2 to browse local git repos",
125
+ },
126
+ "github": {
127
+ "class": "fsspec.implementations.github.GithubFileSystem",
128
+ "err": "Install the requests package to use the github FS",
129
+ },
130
+ "gs": {
131
+ "class": "gcsfs.GCSFileSystem",
132
+ "err": "Please install gcsfs to access Google Storage",
133
+ },
134
+ "hdfs": {
135
+ "class": "fsspec.implementations.arrow.HadoopFileSystem",
136
+ "err": "pyarrow and local java libraries required for HDFS",
137
+ },
138
+ "hf": {
139
+ "class": "huggingface_hub.HfFileSystem",
140
+ "err": "Install huggingface_hub to access HfFileSystem",
141
+ },
142
+ "http": {
143
+ "class": "fsspec.implementations.http.HTTPFileSystem",
144
+ "err": 'HTTPFileSystem requires "requests" and "aiohttp" to be installed',
145
+ },
146
+ "https": {
147
+ "class": "fsspec.implementations.http.HTTPFileSystem",
148
+ "err": 'HTTPFileSystem requires "requests" and "aiohttp" to be installed',
149
+ },
150
+ "jlab": {
151
+ "class": "fsspec.implementations.jupyter.JupyterFileSystem",
152
+ "err": "Jupyter FS requires requests to be installed",
153
+ },
154
+ "jupyter": {
155
+ "class": "fsspec.implementations.jupyter.JupyterFileSystem",
156
+ "err": "Jupyter FS requires requests to be installed",
157
+ },
158
+ "lakefs": {
159
+ "class": "lakefs_spec.LakeFSFileSystem",
160
+ "err": "Please install lakefs-spec to access LakeFSFileSystem",
161
+ },
162
+ "libarchive": {
163
+ "class": "fsspec.implementations.libarchive.LibArchiveFileSystem",
164
+ "err": "LibArchive requires to be installed",
165
+ },
166
+ "local": {"class": "fsspec.implementations.local.LocalFileSystem"},
167
+ "memory": {"class": "fsspec.implementations.memory.MemoryFileSystem"},
168
+ "oci": {
169
+ "class": "ocifs.OCIFileSystem",
170
+ "err": "Install ocifs to access OCI Object Storage",
171
+ },
172
+ "ocilake": {
173
+ "class": "ocifs.OCIFileSystem",
174
+ "err": "Install ocifs to access OCI Data Lake",
175
+ },
176
+ "oss": {
177
+ "class": "ossfs.OSSFileSystem",
178
+ "err": "Install ossfs to access Alibaba Object Storage System",
179
+ },
180
+ "reference": {"class": "fsspec.implementations.reference.ReferenceFileSystem"},
181
+ "root": {
182
+ "class": "fsspec_xrootd.XRootDFileSystem",
183
+ "err": (
184
+ "Install fsspec-xrootd to access xrootd storage system. "
185
+ "Note: 'root' is the protocol name for xrootd storage systems, "
186
+ "not referring to root directories"
187
+ ),
188
+ },
189
+ "s3": {"class": "s3fs.S3FileSystem", "err": "Install s3fs to access S3"},
190
+ "s3a": {"class": "s3fs.S3FileSystem", "err": "Install s3fs to access S3"},
191
+ "sftp": {
192
+ "class": "fsspec.implementations.sftp.SFTPFileSystem",
193
+ "err": 'SFTPFileSystem requires "paramiko" to be installed',
194
+ },
195
+ "simplecache": {"class": "fsspec.implementations.cached.SimpleCacheFileSystem"},
196
+ "smb": {
197
+ "class": "fsspec.implementations.smb.SMBFileSystem",
198
+ "err": 'SMB requires "smbprotocol" or "smbprotocol[kerberos]" installed',
199
+ },
200
+ "ssh": {
201
+ "class": "fsspec.implementations.sftp.SFTPFileSystem",
202
+ "err": 'SFTPFileSystem requires "paramiko" to be installed',
203
+ },
204
+ "tar": {"class": "fsspec.implementations.tar.TarFileSystem"},
205
+ "wandb": {"class": "wandbfs.WandbFS", "err": "Install wandbfs to access wandb"},
206
+ "webdav": {
207
+ "class": "webdav4.fsspec.WebdavFileSystem",
208
+ "err": "Install webdav4 to access WebDAV",
209
+ },
210
+ "webhdfs": {
211
+ "class": "fsspec.implementations.webhdfs.WebHDFS",
212
+ "err": 'webHDFS access requires "requests" to be installed',
213
+ },
214
+ "zip": {"class": "fsspec.implementations.zip.ZipFileSystem"},
215
+ }
216
+
217
+ assert list(known_implementations) == sorted(
218
+ known_implementations
219
+ ), "Not in alphabetical order"
220
+
221
+
222
+ def get_filesystem_class(protocol):
223
+ """Fetch named protocol implementation from the registry
224
+
225
+ The dict ``known_implementations`` maps protocol names to the locations
226
+ of classes implementing the corresponding file-system. When used for the
227
+ first time, appropriate imports will happen and the class will be placed in
228
+ the registry. All subsequent calls will fetch directly from the registry.
229
+
230
+ Some protocol implementations require additional dependencies, and so the
231
+ import may fail. In this case, the string in the "err" field of the
232
+ ``known_implementations`` will be given as the error message.
233
+ """
234
+ if not protocol:
235
+ protocol = default
236
+
237
+ if protocol not in registry:
238
+ if protocol not in known_implementations:
239
+ raise ValueError(f"Protocol not known: {protocol}")
240
+ bit = known_implementations[protocol]
241
+ try:
242
+ register_implementation(protocol, _import_class(bit["class"]))
243
+ except ImportError as e:
244
+ raise ImportError(bit["err"]) from e
245
+ cls = registry[protocol]
246
+ if getattr(cls, "protocol", None) in ("abstract", None):
247
+ cls.protocol = protocol
248
+
249
+ return cls
250
+
251
+
252
+ s3_msg = """Your installed version of s3fs is very old and known to cause
253
+ severe performance issues, see also https://github.com/dask/dask/issues/10276
254
+
255
+ To fix, you should specify a lower version bound on s3fs, or
256
+ update the current installation.
257
+ """
258
+
259
+
260
+ def _import_class(fqp: str):
261
+ """Take a fully-qualified path and return the imported class or identifier.
262
+
263
+ ``fqp`` is of the form "package.module.klass" or
264
+ "package.module:subobject.klass".
265
+
266
+ Warnings
267
+ --------
268
+ This can import arbitrary modules. Make sure you haven't installed any modules
269
+ that may execute malicious code at import time.
270
+ """
271
+ if ":" in fqp:
272
+ mod, name = fqp.rsplit(":", 1)
273
+ else:
274
+ mod, name = fqp.rsplit(".", 1)
275
+
276
+ is_s3 = mod == "s3fs"
277
+ mod = importlib.import_module(mod)
278
+ if is_s3 and mod.__version__.split(".") < ["0", "5"]:
279
+ warnings.warn(s3_msg)
280
+ for part in name.split("."):
281
+ mod = getattr(mod, part)
282
+
283
+ if not isinstance(mod, type):
284
+ raise TypeError(f"{fqp} is not a class")
285
+
286
+ return mod
287
+
288
+
289
+ def filesystem(protocol, **storage_options):
290
+ """Instantiate filesystems for given protocol and arguments
291
+
292
+ ``storage_options`` are specific to the protocol being chosen, and are
293
+ passed directly to the class.
294
+ """
295
+ if protocol == "arrow_hdfs":
296
+ warnings.warn(
297
+ "The 'arrow_hdfs' protocol has been deprecated and will be "
298
+ "removed in the future. Specify it as 'hdfs'.",
299
+ DeprecationWarning,
300
+ )
301
+
302
+ cls = get_filesystem_class(protocol)
303
+ return cls(**storage_options)
304
+
305
+
306
+ def available_protocols():
307
+ """Return a list of the implemented protocols.
308
+
309
+ Note that any given protocol may require extra packages to be importable.
310
+ """
311
+ return list(known_implementations)
parrot/lib/python3.10/site-packages/fsspec/spec.py ADDED
@@ -0,0 +1,2068 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import io
4
+ import json
5
+ import logging
6
+ import os
7
+ import threading
8
+ import warnings
9
+ import weakref
10
+ from errno import ESPIPE
11
+ from glob import has_magic
12
+ from hashlib import sha256
13
+ from typing import Any, ClassVar, Dict, Tuple
14
+
15
+ from .callbacks import DEFAULT_CALLBACK
16
+ from .config import apply_config, conf
17
+ from .dircache import DirCache
18
+ from .transaction import Transaction
19
+ from .utils import (
20
+ _unstrip_protocol,
21
+ glob_translate,
22
+ isfilelike,
23
+ other_paths,
24
+ read_block,
25
+ stringify_path,
26
+ tokenize,
27
+ )
28
+
29
+ logger = logging.getLogger("fsspec")
30
+
31
+
32
+ def make_instance(cls, args, kwargs):
33
+ return cls(*args, **kwargs)
34
+
35
+
36
+ class _Cached(type):
37
+ """
38
+ Metaclass for caching file system instances.
39
+
40
+ Notes
41
+ -----
42
+ Instances are cached according to
43
+
44
+ * The values of the class attributes listed in `_extra_tokenize_attributes`
45
+ * The arguments passed to ``__init__``.
46
+
47
+ This creates an additional reference to the filesystem, which prevents the
48
+ filesystem from being garbage collected when all *user* references go away.
49
+ A call to the :meth:`AbstractFileSystem.clear_instance_cache` must *also*
50
+ be made for a filesystem instance to be garbage collected.
51
+ """
52
+
53
+ def __init__(cls, *args, **kwargs):
54
+ super().__init__(*args, **kwargs)
55
+ # Note: we intentionally create a reference here, to avoid garbage
56
+ # collecting instances when all other references are gone. To really
57
+ # delete a FileSystem, the cache must be cleared.
58
+ if conf.get("weakref_instance_cache"): # pragma: no cover
59
+ # debug option for analysing fork/spawn conditions
60
+ cls._cache = weakref.WeakValueDictionary()
61
+ else:
62
+ cls._cache = {}
63
+ cls._pid = os.getpid()
64
+
65
+ def __call__(cls, *args, **kwargs):
66
+ kwargs = apply_config(cls, kwargs)
67
+ extra_tokens = tuple(
68
+ getattr(cls, attr, None) for attr in cls._extra_tokenize_attributes
69
+ )
70
+ token = tokenize(
71
+ cls, cls._pid, threading.get_ident(), *args, *extra_tokens, **kwargs
72
+ )
73
+ skip = kwargs.pop("skip_instance_cache", False)
74
+ if os.getpid() != cls._pid:
75
+ cls._cache.clear()
76
+ cls._pid = os.getpid()
77
+ if not skip and cls.cachable and token in cls._cache:
78
+ cls._latest = token
79
+ return cls._cache[token]
80
+ else:
81
+ obj = super().__call__(*args, **kwargs)
82
+ # Setting _fs_token here causes some static linters to complain.
83
+ obj._fs_token_ = token
84
+ obj.storage_args = args
85
+ obj.storage_options = kwargs
86
+ if obj.async_impl and obj.mirror_sync_methods:
87
+ from .asyn import mirror_sync_methods
88
+
89
+ mirror_sync_methods(obj)
90
+
91
+ if cls.cachable and not skip:
92
+ cls._latest = token
93
+ cls._cache[token] = obj
94
+ return obj
95
+
96
+
97
+ class AbstractFileSystem(metaclass=_Cached):
98
+ """
99
+ An abstract super-class for pythonic file-systems
100
+
101
+ Implementations are expected to be compatible with or, better, subclass
102
+ from here.
103
+ """
104
+
105
+ cachable = True # this class can be cached, instances reused
106
+ _cached = False
107
+ blocksize = 2**22
108
+ sep = "/"
109
+ protocol: ClassVar[str | tuple[str, ...]] = "abstract"
110
+ _latest = None
111
+ async_impl = False
112
+ mirror_sync_methods = False
113
+ root_marker = "" # For some FSs, may require leading '/' or other character
114
+ transaction_type = Transaction
115
+
116
+ #: Extra *class attributes* that should be considered when hashing.
117
+ _extra_tokenize_attributes = ()
118
+
119
+ # Set by _Cached metaclass
120
+ storage_args: Tuple[Any, ...]
121
+ storage_options: Dict[str, Any]
122
+
123
+ def __init__(self, *args, **storage_options):
124
+ """Create and configure file-system instance
125
+
126
+ Instances may be cachable, so if similar enough arguments are seen
127
+ a new instance is not required. The token attribute exists to allow
128
+ implementations to cache instances if they wish.
129
+
130
+ A reasonable default should be provided if there are no arguments.
131
+
132
+ Subclasses should call this method.
133
+
134
+ Parameters
135
+ ----------
136
+ use_listings_cache, listings_expiry_time, max_paths:
137
+ passed to ``DirCache``, if the implementation supports
138
+ directory listing caching. Pass use_listings_cache=False
139
+ to disable such caching.
140
+ skip_instance_cache: bool
141
+ If this is a cachable implementation, pass True here to force
142
+ creating a new instance even if a matching instance exists, and prevent
143
+ storing this instance.
144
+ asynchronous: bool
145
+ loop: asyncio-compatible IOLoop or None
146
+ """
147
+ if self._cached:
148
+ # reusing instance, don't change
149
+ return
150
+ self._cached = True
151
+ self._intrans = False
152
+ self._transaction = None
153
+ self._invalidated_caches_in_transaction = []
154
+ self.dircache = DirCache(**storage_options)
155
+
156
+ if storage_options.pop("add_docs", None):
157
+ warnings.warn("add_docs is no longer supported.", FutureWarning)
158
+
159
+ if storage_options.pop("add_aliases", None):
160
+ warnings.warn("add_aliases has been removed.", FutureWarning)
161
+ # This is set in _Cached
162
+ self._fs_token_ = None
163
+
164
+ @property
165
+ def fsid(self):
166
+ """Persistent filesystem id that can be used to compare filesystems
167
+ across sessions.
168
+ """
169
+ raise NotImplementedError
170
+
171
+ @property
172
+ def _fs_token(self):
173
+ return self._fs_token_
174
+
175
+ def __dask_tokenize__(self):
176
+ return self._fs_token
177
+
178
+ def __hash__(self):
179
+ return int(self._fs_token, 16)
180
+
181
+ def __eq__(self, other):
182
+ return isinstance(other, type(self)) and self._fs_token == other._fs_token
183
+
184
+ def __reduce__(self):
185
+ return make_instance, (type(self), self.storage_args, self.storage_options)
186
+
187
+ @classmethod
188
+ def _strip_protocol(cls, path):
189
+ """Turn path from fully-qualified to file-system-specific
190
+
191
+ May require FS-specific handling, e.g., for relative paths or links.
192
+ """
193
+ if isinstance(path, list):
194
+ return [cls._strip_protocol(p) for p in path]
195
+ path = stringify_path(path)
196
+ protos = (cls.protocol,) if isinstance(cls.protocol, str) else cls.protocol
197
+ for protocol in protos:
198
+ if path.startswith(protocol + "://"):
199
+ path = path[len(protocol) + 3 :]
200
+ elif path.startswith(protocol + "::"):
201
+ path = path[len(protocol) + 2 :]
202
+ path = path.rstrip("/")
203
+ # use of root_marker to make minimum required path, e.g., "/"
204
+ return path or cls.root_marker
205
+
206
+ def unstrip_protocol(self, name: str) -> str:
207
+ """Format FS-specific path to generic, including protocol"""
208
+ protos = (self.protocol,) if isinstance(self.protocol, str) else self.protocol
209
+ for protocol in protos:
210
+ if name.startswith(f"{protocol}://"):
211
+ return name
212
+ return f"{protos[0]}://{name}"
213
+
214
+ @staticmethod
215
+ def _get_kwargs_from_urls(path):
216
+ """If kwargs can be encoded in the paths, extract them here
217
+
218
+ This should happen before instantiation of the class; incoming paths
219
+ then should be amended to strip the options in methods.
220
+
221
+ Examples may look like an sftp path "sftp://user@host:/my/path", where
222
+ the user and host should become kwargs and later get stripped.
223
+ """
224
+ # by default, nothing happens
225
+ return {}
226
+
227
+ @classmethod
228
+ def current(cls):
229
+ """Return the most recently instantiated FileSystem
230
+
231
+ If no instance has been created, then create one with defaults
232
+ """
233
+ if cls._latest in cls._cache:
234
+ return cls._cache[cls._latest]
235
+ return cls()
236
+
237
+ @property
238
+ def transaction(self):
239
+ """A context within which files are committed together upon exit
240
+
241
+ Requires the file class to implement `.commit()` and `.discard()`
242
+ for the normal and exception cases.
243
+ """
244
+ if self._transaction is None:
245
+ self._transaction = self.transaction_type(self)
246
+ return self._transaction
247
+
248
+ def start_transaction(self):
249
+ """Begin write transaction for deferring files, non-context version"""
250
+ self._intrans = True
251
+ self._transaction = self.transaction_type(self)
252
+ return self.transaction
253
+
254
+ def end_transaction(self):
255
+ """Finish write transaction, non-context version"""
256
+ self.transaction.complete()
257
+ self._transaction = None
258
+ # The invalid cache must be cleared after the transaction is completed.
259
+ for path in self._invalidated_caches_in_transaction:
260
+ self.invalidate_cache(path)
261
+ self._invalidated_caches_in_transaction.clear()
262
+
263
+ def invalidate_cache(self, path=None):
264
+ """
265
+ Discard any cached directory information
266
+
267
+ Parameters
268
+ ----------
269
+ path: string or None
270
+ If None, clear all listings cached else listings at or under given
271
+ path.
272
+ """
273
+ # Not necessary to implement invalidation mechanism, may have no cache.
274
+ # But if have, you should call this method of parent class from your
275
+ # subclass to ensure expiring caches after transacations correctly.
276
+ # See the implementation of FTPFileSystem in ftp.py
277
+ if self._intrans:
278
+ self._invalidated_caches_in_transaction.append(path)
279
+
280
+ def mkdir(self, path, create_parents=True, **kwargs):
281
+ """
282
+ Create directory entry at path
283
+
284
+ For systems that don't have true directories, may create an for
285
+ this instance only and not touch the real filesystem
286
+
287
+ Parameters
288
+ ----------
289
+ path: str
290
+ location
291
+ create_parents: bool
292
+ if True, this is equivalent to ``makedirs``
293
+ kwargs:
294
+ may be permissions, etc.
295
+ """
296
+ pass # not necessary to implement, may not have directories
297
+
298
+ def makedirs(self, path, exist_ok=False):
299
+ """Recursively make directories
300
+
301
+ Creates directory at path and any intervening required directories.
302
+ Raises exception if, for instance, the path already exists but is a
303
+ file.
304
+
305
+ Parameters
306
+ ----------
307
+ path: str
308
+ leaf directory name
309
+ exist_ok: bool (False)
310
+ If False, will error if the target already exists
311
+ """
312
+ pass # not necessary to implement, may not have directories
313
+
314
+ def rmdir(self, path):
315
+ """Remove a directory, if empty"""
316
+ pass # not necessary to implement, may not have directories
317
+
318
+ def ls(self, path, detail=True, **kwargs):
319
+ """List objects at path.
320
+
321
+ This should include subdirectories and files at that location. The
322
+ difference between a file and a directory must be clear when details
323
+ are requested.
324
+
325
+ The specific keys, or perhaps a FileInfo class, or similar, is TBD,
326
+ but must be consistent across implementations.
327
+ Must include:
328
+
329
+ - full path to the entry (without protocol)
330
+ - size of the entry, in bytes. If the value cannot be determined, will
331
+ be ``None``.
332
+ - type of entry, "file", "directory" or other
333
+
334
+ Additional information
335
+ may be present, appropriate to the file-system, e.g., generation,
336
+ checksum, etc.
337
+
338
+ May use refresh=True|False to allow use of self._ls_from_cache to
339
+ check for a saved listing and avoid calling the backend. This would be
340
+ common where listing may be expensive.
341
+
342
+ Parameters
343
+ ----------
344
+ path: str
345
+ detail: bool
346
+ if True, gives a list of dictionaries, where each is the same as
347
+ the result of ``info(path)``. If False, gives a list of paths
348
+ (str).
349
+ kwargs: may have additional backend-specific options, such as version
350
+ information
351
+
352
+ Returns
353
+ -------
354
+ List of strings if detail is False, or list of directory information
355
+ dicts if detail is True.
356
+ """
357
+ raise NotImplementedError
358
+
359
+ def _ls_from_cache(self, path):
360
+ """Check cache for listing
361
+
362
+ Returns listing, if found (may be empty list for a directly that exists
363
+ but contains nothing), None if not in cache.
364
+ """
365
+ parent = self._parent(path)
366
+ try:
367
+ return self.dircache[path.rstrip("/")]
368
+ except KeyError:
369
+ pass
370
+ try:
371
+ files = [
372
+ f
373
+ for f in self.dircache[parent]
374
+ if f["name"] == path
375
+ or (f["name"] == path.rstrip("/") and f["type"] == "directory")
376
+ ]
377
+ if len(files) == 0:
378
+ # parent dir was listed but did not contain this file
379
+ raise FileNotFoundError(path)
380
+ return files
381
+ except KeyError:
382
+ pass
383
+
384
+ def walk(self, path, maxdepth=None, topdown=True, on_error="omit", **kwargs):
385
+ """Return all files belows path
386
+
387
+ List all files, recursing into subdirectories; output is iterator-style,
388
+ like ``os.walk()``. For a simple list of files, ``find()`` is available.
389
+
390
+ When topdown is True, the caller can modify the dirnames list in-place (perhaps
391
+ using del or slice assignment), and walk() will
392
+ only recurse into the subdirectories whose names remain in dirnames;
393
+ this can be used to prune the search, impose a specific order of visiting,
394
+ or even to inform walk() about directories the caller creates or renames before
395
+ it resumes walk() again.
396
+ Modifying dirnames when topdown is False has no effect. (see os.walk)
397
+
398
+ Note that the "files" outputted will include anything that is not
399
+ a directory, such as links.
400
+
401
+ Parameters
402
+ ----------
403
+ path: str
404
+ Root to recurse into
405
+ maxdepth: int
406
+ Maximum recursion depth. None means limitless, but not recommended
407
+ on link-based file-systems.
408
+ topdown: bool (True)
409
+ Whether to walk the directory tree from the top downwards or from
410
+ the bottom upwards.
411
+ on_error: "omit", "raise", a collable
412
+ if omit (default), path with exception will simply be empty;
413
+ If raise, an underlying exception will be raised;
414
+ if callable, it will be called with a single OSError instance as argument
415
+ kwargs: passed to ``ls``
416
+ """
417
+ if maxdepth is not None and maxdepth < 1:
418
+ raise ValueError("maxdepth must be at least 1")
419
+
420
+ path = self._strip_protocol(path)
421
+ full_dirs = {}
422
+ dirs = {}
423
+ files = {}
424
+
425
+ detail = kwargs.pop("detail", False)
426
+ try:
427
+ listing = self.ls(path, detail=True, **kwargs)
428
+ except (FileNotFoundError, OSError) as e:
429
+ if on_error == "raise":
430
+ raise
431
+ elif callable(on_error):
432
+ on_error(e)
433
+ if detail:
434
+ return path, {}, {}
435
+ return path, [], []
436
+
437
+ for info in listing:
438
+ # each info name must be at least [path]/part , but here
439
+ # we check also for names like [path]/part/
440
+ pathname = info["name"].rstrip("/")
441
+ name = pathname.rsplit("/", 1)[-1]
442
+ if info["type"] == "directory" and pathname != path:
443
+ # do not include "self" path
444
+ full_dirs[name] = pathname
445
+ dirs[name] = info
446
+ elif pathname == path:
447
+ # file-like with same name as give path
448
+ files[""] = info
449
+ else:
450
+ files[name] = info
451
+
452
+ if not detail:
453
+ dirs = list(dirs)
454
+ files = list(files)
455
+
456
+ if topdown:
457
+ # Yield before recursion if walking top down
458
+ yield path, dirs, files
459
+
460
+ if maxdepth is not None:
461
+ maxdepth -= 1
462
+ if maxdepth < 1:
463
+ if not topdown:
464
+ yield path, dirs, files
465
+ return
466
+
467
+ for d in dirs:
468
+ yield from self.walk(
469
+ full_dirs[d],
470
+ maxdepth=maxdepth,
471
+ detail=detail,
472
+ topdown=topdown,
473
+ **kwargs,
474
+ )
475
+
476
+ if not topdown:
477
+ # Yield after recursion if walking bottom up
478
+ yield path, dirs, files
479
+
480
+ def find(self, path, maxdepth=None, withdirs=False, detail=False, **kwargs):
481
+ """List all files below path.
482
+
483
+ Like posix ``find`` command without conditions
484
+
485
+ Parameters
486
+ ----------
487
+ path : str
488
+ maxdepth: int or None
489
+ If not None, the maximum number of levels to descend
490
+ withdirs: bool
491
+ Whether to include directory paths in the output. This is True
492
+ when used by glob, but users usually only want files.
493
+ kwargs are passed to ``ls``.
494
+ """
495
+ # TODO: allow equivalent of -name parameter
496
+ path = self._strip_protocol(path)
497
+ out = {}
498
+
499
+ # Add the root directory if withdirs is requested
500
+ # This is needed for posix glob compliance
501
+ if withdirs and path != "" and self.isdir(path):
502
+ out[path] = self.info(path)
503
+
504
+ for _, dirs, files in self.walk(path, maxdepth, detail=True, **kwargs):
505
+ if withdirs:
506
+ files.update(dirs)
507
+ out.update({info["name"]: info for name, info in files.items()})
508
+ if not out and self.isfile(path):
509
+ # walk works on directories, but find should also return [path]
510
+ # when path happens to be a file
511
+ out[path] = {}
512
+ names = sorted(out)
513
+ if not detail:
514
+ return names
515
+ else:
516
+ return {name: out[name] for name in names}
517
+
518
+ def du(self, path, total=True, maxdepth=None, withdirs=False, **kwargs):
519
+ """Space used by files and optionally directories within a path
520
+
521
+ Directory size does not include the size of its contents.
522
+
523
+ Parameters
524
+ ----------
525
+ path: str
526
+ total: bool
527
+ Whether to sum all the file sizes
528
+ maxdepth: int or None
529
+ Maximum number of directory levels to descend, None for unlimited.
530
+ withdirs: bool
531
+ Whether to include directory paths in the output.
532
+ kwargs: passed to ``find``
533
+
534
+ Returns
535
+ -------
536
+ Dict of {path: size} if total=False, or int otherwise, where numbers
537
+ refer to bytes used.
538
+ """
539
+ sizes = {}
540
+ if withdirs and self.isdir(path):
541
+ # Include top-level directory in output
542
+ info = self.info(path)
543
+ sizes[info["name"]] = info["size"]
544
+ for f in self.find(path, maxdepth=maxdepth, withdirs=withdirs, **kwargs):
545
+ info = self.info(f)
546
+ sizes[info["name"]] = info["size"]
547
+ if total:
548
+ return sum(sizes.values())
549
+ else:
550
+ return sizes
551
+
552
+ def glob(self, path, maxdepth=None, **kwargs):
553
+ """
554
+ Find files by glob-matching.
555
+
556
+ If the path ends with '/', only folders are returned.
557
+
558
+ We support ``"**"``,
559
+ ``"?"`` and ``"[..]"``. We do not support ^ for pattern negation.
560
+
561
+ The `maxdepth` option is applied on the first `**` found in the path.
562
+
563
+ kwargs are passed to ``ls``.
564
+ """
565
+ if maxdepth is not None and maxdepth < 1:
566
+ raise ValueError("maxdepth must be at least 1")
567
+
568
+ import re
569
+
570
+ seps = (os.path.sep, os.path.altsep) if os.path.altsep else (os.path.sep,)
571
+ ends_with_sep = path.endswith(seps) # _strip_protocol strips trailing slash
572
+ path = self._strip_protocol(path)
573
+ append_slash_to_dirname = ends_with_sep or path.endswith(
574
+ tuple(sep + "**" for sep in seps)
575
+ )
576
+ idx_star = path.find("*") if path.find("*") >= 0 else len(path)
577
+ idx_qmark = path.find("?") if path.find("?") >= 0 else len(path)
578
+ idx_brace = path.find("[") if path.find("[") >= 0 else len(path)
579
+
580
+ min_idx = min(idx_star, idx_qmark, idx_brace)
581
+
582
+ detail = kwargs.pop("detail", False)
583
+
584
+ if not has_magic(path):
585
+ if self.exists(path, **kwargs):
586
+ if not detail:
587
+ return [path]
588
+ else:
589
+ return {path: self.info(path, **kwargs)}
590
+ else:
591
+ if not detail:
592
+ return [] # glob of non-existent returns empty
593
+ else:
594
+ return {}
595
+ elif "/" in path[:min_idx]:
596
+ min_idx = path[:min_idx].rindex("/")
597
+ root = path[: min_idx + 1]
598
+ depth = path[min_idx + 1 :].count("/") + 1
599
+ else:
600
+ root = ""
601
+ depth = path[min_idx + 1 :].count("/") + 1
602
+
603
+ if "**" in path:
604
+ if maxdepth is not None:
605
+ idx_double_stars = path.find("**")
606
+ depth_double_stars = path[idx_double_stars:].count("/") + 1
607
+ depth = depth - depth_double_stars + maxdepth
608
+ else:
609
+ depth = None
610
+
611
+ allpaths = self.find(root, maxdepth=depth, withdirs=True, detail=True, **kwargs)
612
+
613
+ pattern = glob_translate(path + ("/" if ends_with_sep else ""))
614
+ pattern = re.compile(pattern)
615
+
616
+ out = {
617
+ p: info
618
+ for p, info in sorted(allpaths.items())
619
+ if pattern.match(
620
+ (
621
+ p + "/"
622
+ if append_slash_to_dirname and info["type"] == "directory"
623
+ else p
624
+ )
625
+ )
626
+ }
627
+
628
+ if detail:
629
+ return out
630
+ else:
631
+ return list(out)
632
+
633
+ def exists(self, path, **kwargs):
634
+ """Is there a file at the given path"""
635
+ try:
636
+ self.info(path, **kwargs)
637
+ return True
638
+ except: # noqa: E722
639
+ # any exception allowed bar FileNotFoundError?
640
+ return False
641
+
642
+ def lexists(self, path, **kwargs):
643
+ """If there is a file at the given path (including
644
+ broken links)"""
645
+ return self.exists(path)
646
+
647
+ def info(self, path, **kwargs):
648
+ """Give details of entry at path
649
+
650
+ Returns a single dictionary, with exactly the same information as ``ls``
651
+ would with ``detail=True``.
652
+
653
+ The default implementation should calls ls and could be overridden by a
654
+ shortcut. kwargs are passed on to ```ls()``.
655
+
656
+ Some file systems might not be able to measure the file's size, in
657
+ which case, the returned dict will include ``'size': None``.
658
+
659
+ Returns
660
+ -------
661
+ dict with keys: name (full path in the FS), size (in bytes), type (file,
662
+ directory, or something else) and other FS-specific keys.
663
+ """
664
+ path = self._strip_protocol(path)
665
+ out = self.ls(self._parent(path), detail=True, **kwargs)
666
+ out = [o for o in out if o["name"].rstrip("/") == path]
667
+ if out:
668
+ return out[0]
669
+ out = self.ls(path, detail=True, **kwargs)
670
+ path = path.rstrip("/")
671
+ out1 = [o for o in out if o["name"].rstrip("/") == path]
672
+ if len(out1) == 1:
673
+ if "size" not in out1[0]:
674
+ out1[0]["size"] = None
675
+ return out1[0]
676
+ elif len(out1) > 1 or out:
677
+ return {"name": path, "size": 0, "type": "directory"}
678
+ else:
679
+ raise FileNotFoundError(path)
680
+
681
+ def checksum(self, path):
682
+ """Unique value for current version of file
683
+
684
+ If the checksum is the same from one moment to another, the contents
685
+ are guaranteed to be the same. If the checksum changes, the contents
686
+ *might* have changed.
687
+
688
+ This should normally be overridden; default will probably capture
689
+ creation/modification timestamp (which would be good) or maybe
690
+ access timestamp (which would be bad)
691
+ """
692
+ return int(tokenize(self.info(path)), 16)
693
+
694
+ def size(self, path):
695
+ """Size in bytes of file"""
696
+ return self.info(path).get("size", None)
697
+
698
+ def sizes(self, paths):
699
+ """Size in bytes of each file in a list of paths"""
700
+ return [self.size(p) for p in paths]
701
+
702
+ def isdir(self, path):
703
+ """Is this entry directory-like?"""
704
+ try:
705
+ return self.info(path)["type"] == "directory"
706
+ except OSError:
707
+ return False
708
+
709
+ def isfile(self, path):
710
+ """Is this entry file-like?"""
711
+ try:
712
+ return self.info(path)["type"] == "file"
713
+ except: # noqa: E722
714
+ return False
715
+
716
+ def read_text(self, path, encoding=None, errors=None, newline=None, **kwargs):
717
+ """Get the contents of the file as a string.
718
+
719
+ Parameters
720
+ ----------
721
+ path: str
722
+ URL of file on this filesystems
723
+ encoding, errors, newline: same as `open`.
724
+ """
725
+ with self.open(
726
+ path,
727
+ mode="r",
728
+ encoding=encoding,
729
+ errors=errors,
730
+ newline=newline,
731
+ **kwargs,
732
+ ) as f:
733
+ return f.read()
734
+
735
+ def write_text(
736
+ self, path, value, encoding=None, errors=None, newline=None, **kwargs
737
+ ):
738
+ """Write the text to the given file.
739
+
740
+ An existing file will be overwritten.
741
+
742
+ Parameters
743
+ ----------
744
+ path: str
745
+ URL of file on this filesystems
746
+ value: str
747
+ Text to write.
748
+ encoding, errors, newline: same as `open`.
749
+ """
750
+ with self.open(
751
+ path,
752
+ mode="w",
753
+ encoding=encoding,
754
+ errors=errors,
755
+ newline=newline,
756
+ **kwargs,
757
+ ) as f:
758
+ return f.write(value)
759
+
760
+ def cat_file(self, path, start=None, end=None, **kwargs):
761
+ """Get the content of a file
762
+
763
+ Parameters
764
+ ----------
765
+ path: URL of file on this filesystems
766
+ start, end: int
767
+ Bytes limits of the read. If negative, backwards from end,
768
+ like usual python slices. Either can be None for start or
769
+ end of file, respectively
770
+ kwargs: passed to ``open()``.
771
+ """
772
+ # explicitly set buffering off?
773
+ with self.open(path, "rb", **kwargs) as f:
774
+ if start is not None:
775
+ if start >= 0:
776
+ f.seek(start)
777
+ else:
778
+ f.seek(max(0, f.size + start))
779
+ if end is not None:
780
+ if end < 0:
781
+ end = f.size + end
782
+ return f.read(end - f.tell())
783
+ return f.read()
784
+
785
+ def pipe_file(self, path, value, **kwargs):
786
+ """Set the bytes of given file"""
787
+ with self.open(path, "wb", **kwargs) as f:
788
+ f.write(value)
789
+
790
+ def pipe(self, path, value=None, **kwargs):
791
+ """Put value into path
792
+
793
+ (counterpart to ``cat``)
794
+
795
+ Parameters
796
+ ----------
797
+ path: string or dict(str, bytes)
798
+ If a string, a single remote location to put ``value`` bytes; if a dict,
799
+ a mapping of {path: bytesvalue}.
800
+ value: bytes, optional
801
+ If using a single path, these are the bytes to put there. Ignored if
802
+ ``path`` is a dict
803
+ """
804
+ if isinstance(path, str):
805
+ self.pipe_file(self._strip_protocol(path), value, **kwargs)
806
+ elif isinstance(path, dict):
807
+ for k, v in path.items():
808
+ self.pipe_file(self._strip_protocol(k), v, **kwargs)
809
+ else:
810
+ raise ValueError("path must be str or dict")
811
+
812
+ def cat_ranges(
813
+ self, paths, starts, ends, max_gap=None, on_error="return", **kwargs
814
+ ):
815
+ """Get the contents of byte ranges from one or more files
816
+
817
+ Parameters
818
+ ----------
819
+ paths: list
820
+ A list of of filepaths on this filesystems
821
+ starts, ends: int or list
822
+ Bytes limits of the read. If using a single int, the same value will be
823
+ used to read all the specified files.
824
+ """
825
+ if max_gap is not None:
826
+ raise NotImplementedError
827
+ if not isinstance(paths, list):
828
+ raise TypeError
829
+ if not isinstance(starts, list):
830
+ starts = [starts] * len(paths)
831
+ if not isinstance(ends, list):
832
+ ends = [ends] * len(paths)
833
+ if len(starts) != len(paths) or len(ends) != len(paths):
834
+ raise ValueError
835
+ out = []
836
+ for p, s, e in zip(paths, starts, ends):
837
+ try:
838
+ out.append(self.cat_file(p, s, e))
839
+ except Exception as e:
840
+ if on_error == "return":
841
+ out.append(e)
842
+ else:
843
+ raise
844
+ return out
845
+
846
+ def cat(self, path, recursive=False, on_error="raise", **kwargs):
847
+ """Fetch (potentially multiple) paths' contents
848
+
849
+ Parameters
850
+ ----------
851
+ recursive: bool
852
+ If True, assume the path(s) are directories, and get all the
853
+ contained files
854
+ on_error : "raise", "omit", "return"
855
+ If raise, an underlying exception will be raised (converted to KeyError
856
+ if the type is in self.missing_exceptions); if omit, keys with exception
857
+ will simply not be included in the output; if "return", all keys are
858
+ included in the output, but the value will be bytes or an exception
859
+ instance.
860
+ kwargs: passed to cat_file
861
+
862
+ Returns
863
+ -------
864
+ dict of {path: contents} if there are multiple paths
865
+ or the path has been otherwise expanded
866
+ """
867
+ paths = self.expand_path(path, recursive=recursive)
868
+ if (
869
+ len(paths) > 1
870
+ or isinstance(path, list)
871
+ or paths[0] != self._strip_protocol(path)
872
+ ):
873
+ out = {}
874
+ for path in paths:
875
+ try:
876
+ out[path] = self.cat_file(path, **kwargs)
877
+ except Exception as e:
878
+ if on_error == "raise":
879
+ raise
880
+ if on_error == "return":
881
+ out[path] = e
882
+ return out
883
+ else:
884
+ return self.cat_file(paths[0], **kwargs)
885
+
886
+ def get_file(self, rpath, lpath, callback=DEFAULT_CALLBACK, outfile=None, **kwargs):
887
+ """Copy single remote file to local"""
888
+ from .implementations.local import LocalFileSystem
889
+
890
+ if isfilelike(lpath):
891
+ outfile = lpath
892
+ elif self.isdir(rpath):
893
+ os.makedirs(lpath, exist_ok=True)
894
+ return None
895
+
896
+ fs = LocalFileSystem(auto_mkdir=True)
897
+ fs.makedirs(fs._parent(lpath), exist_ok=True)
898
+
899
+ with self.open(rpath, "rb", **kwargs) as f1:
900
+ if outfile is None:
901
+ outfile = open(lpath, "wb")
902
+
903
+ try:
904
+ callback.set_size(getattr(f1, "size", None))
905
+ data = True
906
+ while data:
907
+ data = f1.read(self.blocksize)
908
+ segment_len = outfile.write(data)
909
+ if segment_len is None:
910
+ segment_len = len(data)
911
+ callback.relative_update(segment_len)
912
+ finally:
913
+ if not isfilelike(lpath):
914
+ outfile.close()
915
+
916
+ def get(
917
+ self,
918
+ rpath,
919
+ lpath,
920
+ recursive=False,
921
+ callback=DEFAULT_CALLBACK,
922
+ maxdepth=None,
923
+ **kwargs,
924
+ ):
925
+ """Copy file(s) to local.
926
+
927
+ Copies a specific file or tree of files (if recursive=True). If lpath
928
+ ends with a "/", it will be assumed to be a directory, and target files
929
+ will go within. Can submit a list of paths, which may be glob-patterns
930
+ and will be expanded.
931
+
932
+ Calls get_file for each source.
933
+ """
934
+ if isinstance(lpath, list) and isinstance(rpath, list):
935
+ # No need to expand paths when both source and destination
936
+ # are provided as lists
937
+ rpaths = rpath
938
+ lpaths = lpath
939
+ else:
940
+ from .implementations.local import (
941
+ LocalFileSystem,
942
+ make_path_posix,
943
+ trailing_sep,
944
+ )
945
+
946
+ source_is_str = isinstance(rpath, str)
947
+ rpaths = self.expand_path(rpath, recursive=recursive, maxdepth=maxdepth)
948
+ if source_is_str and (not recursive or maxdepth is not None):
949
+ # Non-recursive glob does not copy directories
950
+ rpaths = [p for p in rpaths if not (trailing_sep(p) or self.isdir(p))]
951
+ if not rpaths:
952
+ return
953
+
954
+ if isinstance(lpath, str):
955
+ lpath = make_path_posix(lpath)
956
+
957
+ source_is_file = len(rpaths) == 1
958
+ dest_is_dir = isinstance(lpath, str) and (
959
+ trailing_sep(lpath) or LocalFileSystem().isdir(lpath)
960
+ )
961
+
962
+ exists = source_is_str and (
963
+ (has_magic(rpath) and source_is_file)
964
+ or (not has_magic(rpath) and dest_is_dir and not trailing_sep(rpath))
965
+ )
966
+ lpaths = other_paths(
967
+ rpaths,
968
+ lpath,
969
+ exists=exists,
970
+ flatten=not source_is_str,
971
+ )
972
+
973
+ callback.set_size(len(lpaths))
974
+ for lpath, rpath in callback.wrap(zip(lpaths, rpaths)):
975
+ with callback.branched(rpath, lpath) as child:
976
+ self.get_file(rpath, lpath, callback=child, **kwargs)
977
+
978
+ def put_file(self, lpath, rpath, callback=DEFAULT_CALLBACK, **kwargs):
979
+ """Copy single file to remote"""
980
+ if os.path.isdir(lpath):
981
+ self.makedirs(rpath, exist_ok=True)
982
+ return None
983
+
984
+ with open(lpath, "rb") as f1:
985
+ size = f1.seek(0, 2)
986
+ callback.set_size(size)
987
+ f1.seek(0)
988
+
989
+ self.mkdirs(self._parent(os.fspath(rpath)), exist_ok=True)
990
+ with self.open(rpath, "wb", **kwargs) as f2:
991
+ while f1.tell() < size:
992
+ data = f1.read(self.blocksize)
993
+ segment_len = f2.write(data)
994
+ if segment_len is None:
995
+ segment_len = len(data)
996
+ callback.relative_update(segment_len)
997
+
998
+ def put(
999
+ self,
1000
+ lpath,
1001
+ rpath,
1002
+ recursive=False,
1003
+ callback=DEFAULT_CALLBACK,
1004
+ maxdepth=None,
1005
+ **kwargs,
1006
+ ):
1007
+ """Copy file(s) from local.
1008
+
1009
+ Copies a specific file or tree of files (if recursive=True). If rpath
1010
+ ends with a "/", it will be assumed to be a directory, and target files
1011
+ will go within.
1012
+
1013
+ Calls put_file for each source.
1014
+ """
1015
+ if isinstance(lpath, list) and isinstance(rpath, list):
1016
+ # No need to expand paths when both source and destination
1017
+ # are provided as lists
1018
+ rpaths = rpath
1019
+ lpaths = lpath
1020
+ else:
1021
+ from .implementations.local import (
1022
+ LocalFileSystem,
1023
+ make_path_posix,
1024
+ trailing_sep,
1025
+ )
1026
+
1027
+ source_is_str = isinstance(lpath, str)
1028
+ if source_is_str:
1029
+ lpath = make_path_posix(lpath)
1030
+ fs = LocalFileSystem()
1031
+ lpaths = fs.expand_path(lpath, recursive=recursive, maxdepth=maxdepth)
1032
+ if source_is_str and (not recursive or maxdepth is not None):
1033
+ # Non-recursive glob does not copy directories
1034
+ lpaths = [p for p in lpaths if not (trailing_sep(p) or fs.isdir(p))]
1035
+ if not lpaths:
1036
+ return
1037
+
1038
+ source_is_file = len(lpaths) == 1
1039
+ dest_is_dir = isinstance(rpath, str) and (
1040
+ trailing_sep(rpath) or self.isdir(rpath)
1041
+ )
1042
+
1043
+ rpath = (
1044
+ self._strip_protocol(rpath)
1045
+ if isinstance(rpath, str)
1046
+ else [self._strip_protocol(p) for p in rpath]
1047
+ )
1048
+ exists = source_is_str and (
1049
+ (has_magic(lpath) and source_is_file)
1050
+ or (not has_magic(lpath) and dest_is_dir and not trailing_sep(lpath))
1051
+ )
1052
+ rpaths = other_paths(
1053
+ lpaths,
1054
+ rpath,
1055
+ exists=exists,
1056
+ flatten=not source_is_str,
1057
+ )
1058
+
1059
+ callback.set_size(len(rpaths))
1060
+ for lpath, rpath in callback.wrap(zip(lpaths, rpaths)):
1061
+ with callback.branched(lpath, rpath) as child:
1062
+ self.put_file(lpath, rpath, callback=child, **kwargs)
1063
+
1064
+ def head(self, path, size=1024):
1065
+ """Get the first ``size`` bytes from file"""
1066
+ with self.open(path, "rb") as f:
1067
+ return f.read(size)
1068
+
1069
+ def tail(self, path, size=1024):
1070
+ """Get the last ``size`` bytes from file"""
1071
+ with self.open(path, "rb") as f:
1072
+ f.seek(max(-size, -f.size), 2)
1073
+ return f.read()
1074
+
1075
+ def cp_file(self, path1, path2, **kwargs):
1076
+ raise NotImplementedError
1077
+
1078
+ def copy(
1079
+ self, path1, path2, recursive=False, maxdepth=None, on_error=None, **kwargs
1080
+ ):
1081
+ """Copy within two locations in the filesystem
1082
+
1083
+ on_error : "raise", "ignore"
1084
+ If raise, any not-found exceptions will be raised; if ignore any
1085
+ not-found exceptions will cause the path to be skipped; defaults to
1086
+ raise unless recursive is true, where the default is ignore
1087
+ """
1088
+ if on_error is None and recursive:
1089
+ on_error = "ignore"
1090
+ elif on_error is None:
1091
+ on_error = "raise"
1092
+
1093
+ if isinstance(path1, list) and isinstance(path2, list):
1094
+ # No need to expand paths when both source and destination
1095
+ # are provided as lists
1096
+ paths1 = path1
1097
+ paths2 = path2
1098
+ else:
1099
+ from .implementations.local import trailing_sep
1100
+
1101
+ source_is_str = isinstance(path1, str)
1102
+ paths1 = self.expand_path(path1, recursive=recursive, maxdepth=maxdepth)
1103
+ if source_is_str and (not recursive or maxdepth is not None):
1104
+ # Non-recursive glob does not copy directories
1105
+ paths1 = [p for p in paths1 if not (trailing_sep(p) or self.isdir(p))]
1106
+ if not paths1:
1107
+ return
1108
+
1109
+ source_is_file = len(paths1) == 1
1110
+ dest_is_dir = isinstance(path2, str) and (
1111
+ trailing_sep(path2) or self.isdir(path2)
1112
+ )
1113
+
1114
+ exists = source_is_str and (
1115
+ (has_magic(path1) and source_is_file)
1116
+ or (not has_magic(path1) and dest_is_dir and not trailing_sep(path1))
1117
+ )
1118
+ paths2 = other_paths(
1119
+ paths1,
1120
+ path2,
1121
+ exists=exists,
1122
+ flatten=not source_is_str,
1123
+ )
1124
+
1125
+ for p1, p2 in zip(paths1, paths2):
1126
+ try:
1127
+ self.cp_file(p1, p2, **kwargs)
1128
+ except FileNotFoundError:
1129
+ if on_error == "raise":
1130
+ raise
1131
+
1132
+ def expand_path(self, path, recursive=False, maxdepth=None, **kwargs):
1133
+ """Turn one or more globs or directories into a list of all matching paths
1134
+ to files or directories.
1135
+
1136
+ kwargs are passed to ``glob`` or ``find``, which may in turn call ``ls``
1137
+ """
1138
+
1139
+ if maxdepth is not None and maxdepth < 1:
1140
+ raise ValueError("maxdepth must be at least 1")
1141
+
1142
+ if isinstance(path, (str, os.PathLike)):
1143
+ out = self.expand_path([path], recursive, maxdepth)
1144
+ else:
1145
+ out = set()
1146
+ path = [self._strip_protocol(p) for p in path]
1147
+ for p in path:
1148
+ if has_magic(p):
1149
+ bit = set(self.glob(p, maxdepth=maxdepth, **kwargs))
1150
+ out |= bit
1151
+ if recursive:
1152
+ # glob call above expanded one depth so if maxdepth is defined
1153
+ # then decrement it in expand_path call below. If it is zero
1154
+ # after decrementing then avoid expand_path call.
1155
+ if maxdepth is not None and maxdepth <= 1:
1156
+ continue
1157
+ out |= set(
1158
+ self.expand_path(
1159
+ list(bit),
1160
+ recursive=recursive,
1161
+ maxdepth=maxdepth - 1 if maxdepth is not None else None,
1162
+ **kwargs,
1163
+ )
1164
+ )
1165
+ continue
1166
+ elif recursive:
1167
+ rec = set(
1168
+ self.find(
1169
+ p, maxdepth=maxdepth, withdirs=True, detail=False, **kwargs
1170
+ )
1171
+ )
1172
+ out |= rec
1173
+ if p not in out and (recursive is False or self.exists(p)):
1174
+ # should only check once, for the root
1175
+ out.add(p)
1176
+ if not out:
1177
+ raise FileNotFoundError(path)
1178
+ return sorted(out)
1179
+
1180
+ def mv(self, path1, path2, recursive=False, maxdepth=None, **kwargs):
1181
+ """Move file(s) from one location to another"""
1182
+ if path1 == path2:
1183
+ logger.debug("%s mv: The paths are the same, so no files were moved.", self)
1184
+ else:
1185
+ # explicitly raise exception to prevent data corruption
1186
+ self.copy(
1187
+ path1, path2, recursive=recursive, maxdepth=maxdepth, onerror="raise"
1188
+ )
1189
+ self.rm(path1, recursive=recursive)
1190
+
1191
+ def rm_file(self, path):
1192
+ """Delete a file"""
1193
+ self._rm(path)
1194
+
1195
+ def _rm(self, path):
1196
+ """Delete one file"""
1197
+ # this is the old name for the method, prefer rm_file
1198
+ raise NotImplementedError
1199
+
1200
+ def rm(self, path, recursive=False, maxdepth=None):
1201
+ """Delete files.
1202
+
1203
+ Parameters
1204
+ ----------
1205
+ path: str or list of str
1206
+ File(s) to delete.
1207
+ recursive: bool
1208
+ If file(s) are directories, recursively delete contents and then
1209
+ also remove the directory
1210
+ maxdepth: int or None
1211
+ Depth to pass to walk for finding files to delete, if recursive.
1212
+ If None, there will be no limit and infinite recursion may be
1213
+ possible.
1214
+ """
1215
+ path = self.expand_path(path, recursive=recursive, maxdepth=maxdepth)
1216
+ for p in reversed(path):
1217
+ self.rm_file(p)
1218
+
1219
+ @classmethod
1220
+ def _parent(cls, path):
1221
+ path = cls._strip_protocol(path)
1222
+ if "/" in path:
1223
+ parent = path.rsplit("/", 1)[0].lstrip(cls.root_marker)
1224
+ return cls.root_marker + parent
1225
+ else:
1226
+ return cls.root_marker
1227
+
1228
+ def _open(
1229
+ self,
1230
+ path,
1231
+ mode="rb",
1232
+ block_size=None,
1233
+ autocommit=True,
1234
+ cache_options=None,
1235
+ **kwargs,
1236
+ ):
1237
+ """Return raw bytes-mode file-like from the file-system"""
1238
+ return AbstractBufferedFile(
1239
+ self,
1240
+ path,
1241
+ mode,
1242
+ block_size,
1243
+ autocommit,
1244
+ cache_options=cache_options,
1245
+ **kwargs,
1246
+ )
1247
+
1248
+ def open(
1249
+ self,
1250
+ path,
1251
+ mode="rb",
1252
+ block_size=None,
1253
+ cache_options=None,
1254
+ compression=None,
1255
+ **kwargs,
1256
+ ):
1257
+ """
1258
+ Return a file-like object from the filesystem
1259
+
1260
+ The resultant instance must function correctly in a context ``with``
1261
+ block.
1262
+
1263
+ Parameters
1264
+ ----------
1265
+ path: str
1266
+ Target file
1267
+ mode: str like 'rb', 'w'
1268
+ See builtin ``open()``
1269
+ block_size: int
1270
+ Some indication of buffering - this is a value in bytes
1271
+ cache_options : dict, optional
1272
+ Extra arguments to pass through to the cache.
1273
+ compression: string or None
1274
+ If given, open file using compression codec. Can either be a compression
1275
+ name (a key in ``fsspec.compression.compr``) or "infer" to guess the
1276
+ compression from the filename suffix.
1277
+ encoding, errors, newline: passed on to TextIOWrapper for text mode
1278
+ """
1279
+ import io
1280
+
1281
+ path = self._strip_protocol(path)
1282
+ if "b" not in mode:
1283
+ mode = mode.replace("t", "") + "b"
1284
+
1285
+ text_kwargs = {
1286
+ k: kwargs.pop(k)
1287
+ for k in ["encoding", "errors", "newline"]
1288
+ if k in kwargs
1289
+ }
1290
+ return io.TextIOWrapper(
1291
+ self.open(
1292
+ path,
1293
+ mode,
1294
+ block_size=block_size,
1295
+ cache_options=cache_options,
1296
+ compression=compression,
1297
+ **kwargs,
1298
+ ),
1299
+ **text_kwargs,
1300
+ )
1301
+ else:
1302
+ ac = kwargs.pop("autocommit", not self._intrans)
1303
+ f = self._open(
1304
+ path,
1305
+ mode=mode,
1306
+ block_size=block_size,
1307
+ autocommit=ac,
1308
+ cache_options=cache_options,
1309
+ **kwargs,
1310
+ )
1311
+ if compression is not None:
1312
+ from fsspec.compression import compr
1313
+ from fsspec.core import get_compression
1314
+
1315
+ compression = get_compression(path, compression)
1316
+ compress = compr[compression]
1317
+ f = compress(f, mode=mode[0])
1318
+
1319
+ if not ac and "r" not in mode:
1320
+ self.transaction.files.append(f)
1321
+ return f
1322
+
1323
+ def touch(self, path, truncate=True, **kwargs):
1324
+ """Create empty file, or update timestamp
1325
+
1326
+ Parameters
1327
+ ----------
1328
+ path: str
1329
+ file location
1330
+ truncate: bool
1331
+ If True, always set file size to 0; if False, update timestamp and
1332
+ leave file unchanged, if backend allows this
1333
+ """
1334
+ if truncate or not self.exists(path):
1335
+ with self.open(path, "wb", **kwargs):
1336
+ pass
1337
+ else:
1338
+ raise NotImplementedError # update timestamp, if possible
1339
+
1340
+ def ukey(self, path):
1341
+ """Hash of file properties, to tell if it has changed"""
1342
+ return sha256(str(self.info(path)).encode()).hexdigest()
1343
+
1344
+ def read_block(self, fn, offset, length, delimiter=None):
1345
+ """Read a block of bytes from
1346
+
1347
+ Starting at ``offset`` of the file, read ``length`` bytes. If
1348
+ ``delimiter`` is set then we ensure that the read starts and stops at
1349
+ delimiter boundaries that follow the locations ``offset`` and ``offset
1350
+ + length``. If ``offset`` is zero then we start at zero. The
1351
+ bytestring returned WILL include the end delimiter string.
1352
+
1353
+ If offset+length is beyond the eof, reads to eof.
1354
+
1355
+ Parameters
1356
+ ----------
1357
+ fn: string
1358
+ Path to filename
1359
+ offset: int
1360
+ Byte offset to start read
1361
+ length: int
1362
+ Number of bytes to read. If None, read to end.
1363
+ delimiter: bytes (optional)
1364
+ Ensure reading starts and stops at delimiter bytestring
1365
+
1366
+ Examples
1367
+ --------
1368
+ >>> fs.read_block('data/file.csv', 0, 13) # doctest: +SKIP
1369
+ b'Alice, 100\\nBo'
1370
+ >>> fs.read_block('data/file.csv', 0, 13, delimiter=b'\\n') # doctest: +SKIP
1371
+ b'Alice, 100\\nBob, 200\\n'
1372
+
1373
+ Use ``length=None`` to read to the end of the file.
1374
+ >>> fs.read_block('data/file.csv', 0, None, delimiter=b'\\n') # doctest: +SKIP
1375
+ b'Alice, 100\\nBob, 200\\nCharlie, 300'
1376
+
1377
+ See Also
1378
+ --------
1379
+ :func:`fsspec.utils.read_block`
1380
+ """
1381
+ with self.open(fn, "rb") as f:
1382
+ size = f.size
1383
+ if length is None:
1384
+ length = size
1385
+ if size is not None and offset + length > size:
1386
+ length = size - offset
1387
+ return read_block(f, offset, length, delimiter)
1388
+
1389
+ def to_json(self, *, include_password: bool = True) -> str:
1390
+ """
1391
+ JSON representation of this filesystem instance.
1392
+
1393
+ Parameters
1394
+ ----------
1395
+ include_password: bool, default True
1396
+ Whether to include the password (if any) in the output.
1397
+
1398
+ Returns
1399
+ -------
1400
+ JSON string with keys ``cls`` (the python location of this class),
1401
+ protocol (text name of this class's protocol, first one in case of
1402
+ multiple), ``args`` (positional args, usually empty), and all other
1403
+ keyword arguments as their own keys.
1404
+
1405
+ Warnings
1406
+ --------
1407
+ Serialized filesystems may contain sensitive information which have been
1408
+ passed to the constructor, such as passwords and tokens. Make sure you
1409
+ store and send them in a secure environment!
1410
+ """
1411
+ from .json import FilesystemJSONEncoder
1412
+
1413
+ return json.dumps(
1414
+ self,
1415
+ cls=type(
1416
+ "_FilesystemJSONEncoder",
1417
+ (FilesystemJSONEncoder,),
1418
+ {"include_password": include_password},
1419
+ ),
1420
+ )
1421
+
1422
+ @staticmethod
1423
+ def from_json(blob: str) -> AbstractFileSystem:
1424
+ """
1425
+ Recreate a filesystem instance from JSON representation.
1426
+
1427
+ See ``.to_json()`` for the expected structure of the input.
1428
+
1429
+ Parameters
1430
+ ----------
1431
+ blob: str
1432
+
1433
+ Returns
1434
+ -------
1435
+ file system instance, not necessarily of this particular class.
1436
+
1437
+ Warnings
1438
+ --------
1439
+ This can import arbitrary modules (as determined by the ``cls`` key).
1440
+ Make sure you haven't installed any modules that may execute malicious code
1441
+ at import time.
1442
+ """
1443
+ from .json import FilesystemJSONDecoder
1444
+
1445
+ return json.loads(blob, cls=FilesystemJSONDecoder)
1446
+
1447
+ def to_dict(self, *, include_password: bool = True) -> Dict[str, Any]:
1448
+ """
1449
+ JSON-serializable dictionary representation of this filesystem instance.
1450
+
1451
+ Parameters
1452
+ ----------
1453
+ include_password: bool, default True
1454
+ Whether to include the password (if any) in the output.
1455
+
1456
+ Returns
1457
+ -------
1458
+ Dictionary with keys ``cls`` (the python location of this class),
1459
+ protocol (text name of this class's protocol, first one in case of
1460
+ multiple), ``args`` (positional args, usually empty), and all other
1461
+ keyword arguments as their own keys.
1462
+
1463
+ Warnings
1464
+ --------
1465
+ Serialized filesystems may contain sensitive information which have been
1466
+ passed to the constructor, such as passwords and tokens. Make sure you
1467
+ store and send them in a secure environment!
1468
+ """
1469
+ from .json import FilesystemJSONEncoder
1470
+
1471
+ json_encoder = FilesystemJSONEncoder()
1472
+
1473
+ cls = type(self)
1474
+ proto = self.protocol
1475
+
1476
+ storage_options = dict(self.storage_options)
1477
+ if not include_password:
1478
+ storage_options.pop("password", None)
1479
+
1480
+ return dict(
1481
+ cls=f"{cls.__module__}:{cls.__name__}",
1482
+ protocol=proto[0] if isinstance(proto, (tuple, list)) else proto,
1483
+ args=json_encoder.make_serializable(self.storage_args),
1484
+ **json_encoder.make_serializable(storage_options),
1485
+ )
1486
+
1487
+ @staticmethod
1488
+ def from_dict(dct: Dict[str, Any]) -> AbstractFileSystem:
1489
+ """
1490
+ Recreate a filesystem instance from dictionary representation.
1491
+
1492
+ See ``.to_dict()`` for the expected structure of the input.
1493
+
1494
+ Parameters
1495
+ ----------
1496
+ dct: Dict[str, Any]
1497
+
1498
+ Returns
1499
+ -------
1500
+ file system instance, not necessarily of this particular class.
1501
+
1502
+ Warnings
1503
+ --------
1504
+ This can import arbitrary modules (as determined by the ``cls`` key).
1505
+ Make sure you haven't installed any modules that may execute malicious code
1506
+ at import time.
1507
+ """
1508
+ from .json import FilesystemJSONDecoder
1509
+
1510
+ json_decoder = FilesystemJSONDecoder()
1511
+
1512
+ dct = dict(dct) # Defensive copy
1513
+
1514
+ cls = FilesystemJSONDecoder.try_resolve_fs_cls(dct)
1515
+ if cls is None:
1516
+ raise ValueError("Not a serialized AbstractFileSystem")
1517
+
1518
+ dct.pop("cls", None)
1519
+ dct.pop("protocol", None)
1520
+
1521
+ return cls(
1522
+ *json_decoder.unmake_serializable(dct.pop("args", ())),
1523
+ **json_decoder.unmake_serializable(dct),
1524
+ )
1525
+
1526
+ def _get_pyarrow_filesystem(self):
1527
+ """
1528
+ Make a version of the FS instance which will be acceptable to pyarrow
1529
+ """
1530
+ # all instances already also derive from pyarrow
1531
+ return self
1532
+
1533
+ def get_mapper(self, root="", check=False, create=False, missing_exceptions=None):
1534
+ """Create key/value store based on this file-system
1535
+
1536
+ Makes a MutableMapping interface to the FS at the given root path.
1537
+ See ``fsspec.mapping.FSMap`` for further details.
1538
+ """
1539
+ from .mapping import FSMap
1540
+
1541
+ return FSMap(
1542
+ root,
1543
+ self,
1544
+ check=check,
1545
+ create=create,
1546
+ missing_exceptions=missing_exceptions,
1547
+ )
1548
+
1549
+ @classmethod
1550
+ def clear_instance_cache(cls):
1551
+ """
1552
+ Clear the cache of filesystem instances.
1553
+
1554
+ Notes
1555
+ -----
1556
+ Unless overridden by setting the ``cachable`` class attribute to False,
1557
+ the filesystem class stores a reference to newly created instances. This
1558
+ prevents Python's normal rules around garbage collection from working,
1559
+ since the instances refcount will not drop to zero until
1560
+ ``clear_instance_cache`` is called.
1561
+ """
1562
+ cls._cache.clear()
1563
+
1564
+ def created(self, path):
1565
+ """Return the created timestamp of a file as a datetime.datetime"""
1566
+ raise NotImplementedError
1567
+
1568
+ def modified(self, path):
1569
+ """Return the modified timestamp of a file as a datetime.datetime"""
1570
+ raise NotImplementedError
1571
+
1572
+ # ------------------------------------------------------------------------
1573
+ # Aliases
1574
+
1575
+ def read_bytes(self, path, start=None, end=None, **kwargs):
1576
+ """Alias of `AbstractFileSystem.cat_file`."""
1577
+ return self.cat_file(path, start=start, end=end, **kwargs)
1578
+
1579
+ def write_bytes(self, path, value, **kwargs):
1580
+ """Alias of `AbstractFileSystem.pipe_file`."""
1581
+ self.pipe_file(path, value, **kwargs)
1582
+
1583
+ def makedir(self, path, create_parents=True, **kwargs):
1584
+ """Alias of `AbstractFileSystem.mkdir`."""
1585
+ return self.mkdir(path, create_parents=create_parents, **kwargs)
1586
+
1587
+ def mkdirs(self, path, exist_ok=False):
1588
+ """Alias of `AbstractFileSystem.makedirs`."""
1589
+ return self.makedirs(path, exist_ok=exist_ok)
1590
+
1591
+ def listdir(self, path, detail=True, **kwargs):
1592
+ """Alias of `AbstractFileSystem.ls`."""
1593
+ return self.ls(path, detail=detail, **kwargs)
1594
+
1595
+ def cp(self, path1, path2, **kwargs):
1596
+ """Alias of `AbstractFileSystem.copy`."""
1597
+ return self.copy(path1, path2, **kwargs)
1598
+
1599
+ def move(self, path1, path2, **kwargs):
1600
+ """Alias of `AbstractFileSystem.mv`."""
1601
+ return self.mv(path1, path2, **kwargs)
1602
+
1603
+ def stat(self, path, **kwargs):
1604
+ """Alias of `AbstractFileSystem.info`."""
1605
+ return self.info(path, **kwargs)
1606
+
1607
+ def disk_usage(self, path, total=True, maxdepth=None, **kwargs):
1608
+ """Alias of `AbstractFileSystem.du`."""
1609
+ return self.du(path, total=total, maxdepth=maxdepth, **kwargs)
1610
+
1611
+ def rename(self, path1, path2, **kwargs):
1612
+ """Alias of `AbstractFileSystem.mv`."""
1613
+ return self.mv(path1, path2, **kwargs)
1614
+
1615
+ def delete(self, path, recursive=False, maxdepth=None):
1616
+ """Alias of `AbstractFileSystem.rm`."""
1617
+ return self.rm(path, recursive=recursive, maxdepth=maxdepth)
1618
+
1619
+ def upload(self, lpath, rpath, recursive=False, **kwargs):
1620
+ """Alias of `AbstractFileSystem.put`."""
1621
+ return self.put(lpath, rpath, recursive=recursive, **kwargs)
1622
+
1623
+ def download(self, rpath, lpath, recursive=False, **kwargs):
1624
+ """Alias of `AbstractFileSystem.get`."""
1625
+ return self.get(rpath, lpath, recursive=recursive, **kwargs)
1626
+
1627
+ def sign(self, path, expiration=100, **kwargs):
1628
+ """Create a signed URL representing the given path
1629
+
1630
+ Some implementations allow temporary URLs to be generated, as a
1631
+ way of delegating credentials.
1632
+
1633
+ Parameters
1634
+ ----------
1635
+ path : str
1636
+ The path on the filesystem
1637
+ expiration : int
1638
+ Number of seconds to enable the URL for (if supported)
1639
+
1640
+ Returns
1641
+ -------
1642
+ URL : str
1643
+ The signed URL
1644
+
1645
+ Raises
1646
+ ------
1647
+ NotImplementedError : if method is not implemented for a filesystem
1648
+ """
1649
+ raise NotImplementedError("Sign is not implemented for this filesystem")
1650
+
1651
+ def _isfilestore(self):
1652
+ # Originally inherited from pyarrow DaskFileSystem. Keeping this
1653
+ # here for backwards compatibility as long as pyarrow uses its
1654
+ # legacy fsspec-compatible filesystems and thus accepts fsspec
1655
+ # filesystems as well
1656
+ return False
1657
+
1658
+
1659
+ class AbstractBufferedFile(io.IOBase):
1660
+ """Convenient class to derive from to provide buffering
1661
+
1662
+ In the case that the backend does not provide a pythonic file-like object
1663
+ already, this class contains much of the logic to build one. The only
1664
+ methods that need to be overridden are ``_upload_chunk``,
1665
+ ``_initiate_upload`` and ``_fetch_range``.
1666
+ """
1667
+
1668
+ DEFAULT_BLOCK_SIZE = 5 * 2**20
1669
+ _details = None
1670
+
1671
+ def __init__(
1672
+ self,
1673
+ fs,
1674
+ path,
1675
+ mode="rb",
1676
+ block_size="default",
1677
+ autocommit=True,
1678
+ cache_type="readahead",
1679
+ cache_options=None,
1680
+ size=None,
1681
+ **kwargs,
1682
+ ):
1683
+ """
1684
+ Template for files with buffered reading and writing
1685
+
1686
+ Parameters
1687
+ ----------
1688
+ fs: instance of FileSystem
1689
+ path: str
1690
+ location in file-system
1691
+ mode: str
1692
+ Normal file modes. Currently only 'wb', 'ab' or 'rb'. Some file
1693
+ systems may be read-only, and some may not support append.
1694
+ block_size: int
1695
+ Buffer size for reading or writing, 'default' for class default
1696
+ autocommit: bool
1697
+ Whether to write to final destination; may only impact what
1698
+ happens when file is being closed.
1699
+ cache_type: {"readahead", "none", "mmap", "bytes"}, default "readahead"
1700
+ Caching policy in read mode. See the definitions in ``core``.
1701
+ cache_options : dict
1702
+ Additional options passed to the constructor for the cache specified
1703
+ by `cache_type`.
1704
+ size: int
1705
+ If given and in read mode, suppressed having to look up the file size
1706
+ kwargs:
1707
+ Gets stored as self.kwargs
1708
+ """
1709
+ from .core import caches
1710
+
1711
+ self.path = path
1712
+ self.fs = fs
1713
+ self.mode = mode
1714
+ self.blocksize = (
1715
+ self.DEFAULT_BLOCK_SIZE if block_size in ["default", None] else block_size
1716
+ )
1717
+ self.loc = 0
1718
+ self.autocommit = autocommit
1719
+ self.end = None
1720
+ self.start = None
1721
+ self.closed = False
1722
+
1723
+ if cache_options is None:
1724
+ cache_options = {}
1725
+
1726
+ if "trim" in kwargs:
1727
+ warnings.warn(
1728
+ "Passing 'trim' to control the cache behavior has been deprecated. "
1729
+ "Specify it within the 'cache_options' argument instead.",
1730
+ FutureWarning,
1731
+ )
1732
+ cache_options["trim"] = kwargs.pop("trim")
1733
+
1734
+ self.kwargs = kwargs
1735
+
1736
+ if mode not in {"ab", "rb", "wb"}:
1737
+ raise NotImplementedError("File mode not supported")
1738
+ if mode == "rb":
1739
+ if size is not None:
1740
+ self.size = size
1741
+ else:
1742
+ self.size = self.details["size"]
1743
+ self.cache = caches[cache_type](
1744
+ self.blocksize, self._fetch_range, self.size, **cache_options
1745
+ )
1746
+ else:
1747
+ self.buffer = io.BytesIO()
1748
+ self.offset = None
1749
+ self.forced = False
1750
+ self.location = None
1751
+
1752
+ @property
1753
+ def details(self):
1754
+ if self._details is None:
1755
+ self._details = self.fs.info(self.path)
1756
+ return self._details
1757
+
1758
+ @details.setter
1759
+ def details(self, value):
1760
+ self._details = value
1761
+ self.size = value["size"]
1762
+
1763
+ @property
1764
+ def full_name(self):
1765
+ return _unstrip_protocol(self.path, self.fs)
1766
+
1767
+ @property
1768
+ def closed(self):
1769
+ # get around this attr being read-only in IOBase
1770
+ # use getattr here, since this can be called during del
1771
+ return getattr(self, "_closed", True)
1772
+
1773
+ @closed.setter
1774
+ def closed(self, c):
1775
+ self._closed = c
1776
+
1777
+ def __hash__(self):
1778
+ if "w" in self.mode:
1779
+ return id(self)
1780
+ else:
1781
+ return int(tokenize(self.details), 16)
1782
+
1783
+ def __eq__(self, other):
1784
+ """Files are equal if they have the same checksum, only in read mode"""
1785
+ if self is other:
1786
+ return True
1787
+ return (
1788
+ isinstance(other, type(self))
1789
+ and self.mode == "rb"
1790
+ and other.mode == "rb"
1791
+ and hash(self) == hash(other)
1792
+ )
1793
+
1794
+ def commit(self):
1795
+ """Move from temp to final destination"""
1796
+
1797
+ def discard(self):
1798
+ """Throw away temporary file"""
1799
+
1800
+ def info(self):
1801
+ """File information about this path"""
1802
+ if "r" in self.mode:
1803
+ return self.details
1804
+ else:
1805
+ raise ValueError("Info not available while writing")
1806
+
1807
+ def tell(self):
1808
+ """Current file location"""
1809
+ return self.loc
1810
+
1811
+ def seek(self, loc, whence=0):
1812
+ """Set current file location
1813
+
1814
+ Parameters
1815
+ ----------
1816
+ loc: int
1817
+ byte location
1818
+ whence: {0, 1, 2}
1819
+ from start of file, current location or end of file, resp.
1820
+ """
1821
+ loc = int(loc)
1822
+ if not self.mode == "rb":
1823
+ raise OSError(ESPIPE, "Seek only available in read mode")
1824
+ if whence == 0:
1825
+ nloc = loc
1826
+ elif whence == 1:
1827
+ nloc = self.loc + loc
1828
+ elif whence == 2:
1829
+ nloc = self.size + loc
1830
+ else:
1831
+ raise ValueError(f"invalid whence ({whence}, should be 0, 1 or 2)")
1832
+ if nloc < 0:
1833
+ raise ValueError("Seek before start of file")
1834
+ self.loc = nloc
1835
+ return self.loc
1836
+
1837
+ def write(self, data):
1838
+ """
1839
+ Write data to buffer.
1840
+
1841
+ Buffer only sent on flush() or if buffer is greater than
1842
+ or equal to blocksize.
1843
+
1844
+ Parameters
1845
+ ----------
1846
+ data: bytes
1847
+ Set of bytes to be written.
1848
+ """
1849
+ if self.mode not in {"wb", "ab"}:
1850
+ raise ValueError("File not in write mode")
1851
+ if self.closed:
1852
+ raise ValueError("I/O operation on closed file.")
1853
+ if self.forced:
1854
+ raise ValueError("This file has been force-flushed, can only close")
1855
+ out = self.buffer.write(data)
1856
+ self.loc += out
1857
+ if self.buffer.tell() >= self.blocksize:
1858
+ self.flush()
1859
+ return out
1860
+
1861
+ def flush(self, force=False):
1862
+ """
1863
+ Write buffered data to backend store.
1864
+
1865
+ Writes the current buffer, if it is larger than the block-size, or if
1866
+ the file is being closed.
1867
+
1868
+ Parameters
1869
+ ----------
1870
+ force: bool
1871
+ When closing, write the last block even if it is smaller than
1872
+ blocks are allowed to be. Disallows further writing to this file.
1873
+ """
1874
+
1875
+ if self.closed:
1876
+ raise ValueError("Flush on closed file")
1877
+ if force and self.forced:
1878
+ raise ValueError("Force flush cannot be called more than once")
1879
+ if force:
1880
+ self.forced = True
1881
+
1882
+ if self.mode not in {"wb", "ab"}:
1883
+ # no-op to flush on read-mode
1884
+ return
1885
+
1886
+ if not force and self.buffer.tell() < self.blocksize:
1887
+ # Defer write on small block
1888
+ return
1889
+
1890
+ if self.offset is None:
1891
+ # Initialize a multipart upload
1892
+ self.offset = 0
1893
+ try:
1894
+ self._initiate_upload()
1895
+ except: # noqa: E722
1896
+ self.closed = True
1897
+ raise
1898
+
1899
+ if self._upload_chunk(final=force) is not False:
1900
+ self.offset += self.buffer.seek(0, 2)
1901
+ self.buffer = io.BytesIO()
1902
+
1903
+ def _upload_chunk(self, final=False):
1904
+ """Write one part of a multi-block file upload
1905
+
1906
+ Parameters
1907
+ ==========
1908
+ final: bool
1909
+ This is the last block, so should complete file, if
1910
+ self.autocommit is True.
1911
+ """
1912
+ # may not yet have been initialized, may need to call _initialize_upload
1913
+
1914
+ def _initiate_upload(self):
1915
+ """Create remote file/upload"""
1916
+ pass
1917
+
1918
+ def _fetch_range(self, start, end):
1919
+ """Get the specified set of bytes from remote"""
1920
+ raise NotImplementedError
1921
+
1922
+ def read(self, length=-1):
1923
+ """
1924
+ Return data from cache, or fetch pieces as necessary
1925
+
1926
+ Parameters
1927
+ ----------
1928
+ length: int (-1)
1929
+ Number of bytes to read; if <0, all remaining bytes.
1930
+ """
1931
+ length = -1 if length is None else int(length)
1932
+ if self.mode != "rb":
1933
+ raise ValueError("File not in read mode")
1934
+ if length < 0:
1935
+ length = self.size - self.loc
1936
+ if self.closed:
1937
+ raise ValueError("I/O operation on closed file.")
1938
+ if length == 0:
1939
+ # don't even bother calling fetch
1940
+ return b""
1941
+ out = self.cache._fetch(self.loc, self.loc + length)
1942
+
1943
+ logger.debug(
1944
+ "%s read: %i - %i %s",
1945
+ self,
1946
+ self.loc,
1947
+ self.loc + length,
1948
+ self.cache._log_stats(),
1949
+ )
1950
+ self.loc += len(out)
1951
+ return out
1952
+
1953
+ def readinto(self, b):
1954
+ """mirrors builtin file's readinto method
1955
+
1956
+ https://docs.python.org/3/library/io.html#io.RawIOBase.readinto
1957
+ """
1958
+ out = memoryview(b).cast("B")
1959
+ data = self.read(out.nbytes)
1960
+ out[: len(data)] = data
1961
+ return len(data)
1962
+
1963
+ def readuntil(self, char=b"\n", blocks=None):
1964
+ """Return data between current position and first occurrence of char
1965
+
1966
+ char is included in the output, except if the end of the tile is
1967
+ encountered first.
1968
+
1969
+ Parameters
1970
+ ----------
1971
+ char: bytes
1972
+ Thing to find
1973
+ blocks: None or int
1974
+ How much to read in each go. Defaults to file blocksize - which may
1975
+ mean a new read on every call.
1976
+ """
1977
+ out = []
1978
+ while True:
1979
+ start = self.tell()
1980
+ part = self.read(blocks or self.blocksize)
1981
+ if len(part) == 0:
1982
+ break
1983
+ found = part.find(char)
1984
+ if found > -1:
1985
+ out.append(part[: found + len(char)])
1986
+ self.seek(start + found + len(char))
1987
+ break
1988
+ out.append(part)
1989
+ return b"".join(out)
1990
+
1991
+ def readline(self):
1992
+ """Read until first occurrence of newline character
1993
+
1994
+ Note that, because of character encoding, this is not necessarily a
1995
+ true line ending.
1996
+ """
1997
+ return self.readuntil(b"\n")
1998
+
1999
+ def __next__(self):
2000
+ out = self.readline()
2001
+ if out:
2002
+ return out
2003
+ raise StopIteration
2004
+
2005
+ def __iter__(self):
2006
+ return self
2007
+
2008
+ def readlines(self):
2009
+ """Return all data, split by the newline character"""
2010
+ data = self.read()
2011
+ lines = data.split(b"\n")
2012
+ out = [l + b"\n" for l in lines[:-1]]
2013
+ if data.endswith(b"\n"):
2014
+ return out
2015
+ else:
2016
+ return out + [lines[-1]]
2017
+ # return list(self) ???
2018
+
2019
+ def readinto1(self, b):
2020
+ return self.readinto(b)
2021
+
2022
+ def close(self):
2023
+ """Close file
2024
+
2025
+ Finalizes writes, discards cache
2026
+ """
2027
+ if getattr(self, "_unclosable", False):
2028
+ return
2029
+ if self.closed:
2030
+ return
2031
+ if self.mode == "rb":
2032
+ self.cache = None
2033
+ else:
2034
+ if not self.forced:
2035
+ self.flush(force=True)
2036
+
2037
+ if self.fs is not None:
2038
+ self.fs.invalidate_cache(self.path)
2039
+ self.fs.invalidate_cache(self.fs._parent(self.path))
2040
+
2041
+ self.closed = True
2042
+
2043
+ def readable(self):
2044
+ """Whether opened for reading"""
2045
+ return self.mode == "rb" and not self.closed
2046
+
2047
+ def seekable(self):
2048
+ """Whether is seekable (only in read mode)"""
2049
+ return self.readable()
2050
+
2051
+ def writable(self):
2052
+ """Whether opened for writing"""
2053
+ return self.mode in {"wb", "ab"} and not self.closed
2054
+
2055
+ def __del__(self):
2056
+ if not self.closed:
2057
+ self.close()
2058
+
2059
+ def __str__(self):
2060
+ return f"<File-like object {type(self.fs).__name__}, {self.path}>"
2061
+
2062
+ __repr__ = __str__
2063
+
2064
+ def __enter__(self):
2065
+ return self
2066
+
2067
+ def __exit__(self, *args):
2068
+ self.close()
parrot/lib/python3.10/site-packages/fsspec/transaction.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import deque
2
+
3
+
4
+ class Transaction:
5
+ """Filesystem transaction write context
6
+
7
+ Gathers files for deferred commit or discard, so that several write
8
+ operations can be finalized semi-atomically. This works by having this
9
+ instance as the ``.transaction`` attribute of the given filesystem
10
+ """
11
+
12
+ def __init__(self, fs, **kwargs):
13
+ """
14
+ Parameters
15
+ ----------
16
+ fs: FileSystem instance
17
+ """
18
+ self.fs = fs
19
+ self.files = deque()
20
+
21
+ def __enter__(self):
22
+ self.start()
23
+ return self
24
+
25
+ def __exit__(self, exc_type, exc_val, exc_tb):
26
+ """End transaction and commit, if exit is not due to exception"""
27
+ # only commit if there was no exception
28
+ self.complete(commit=exc_type is None)
29
+ if self.fs:
30
+ self.fs._intrans = False
31
+ self.fs._transaction = None
32
+ self.fs = None
33
+
34
+ def start(self):
35
+ """Start a transaction on this FileSystem"""
36
+ self.files = deque() # clean up after previous failed completions
37
+ self.fs._intrans = True
38
+
39
+ def complete(self, commit=True):
40
+ """Finish transaction: commit or discard all deferred files"""
41
+ while self.files:
42
+ f = self.files.popleft()
43
+ if commit:
44
+ f.commit()
45
+ else:
46
+ f.discard()
47
+ self.fs._intrans = False
48
+ self.fs._transaction = None
49
+ self.fs = None
50
+
51
+
52
+ class FileActor:
53
+ def __init__(self):
54
+ self.files = []
55
+
56
+ def commit(self):
57
+ for f in self.files:
58
+ f.commit()
59
+ self.files.clear()
60
+
61
+ def discard(self):
62
+ for f in self.files:
63
+ f.discard()
64
+ self.files.clear()
65
+
66
+ def append(self, f):
67
+ self.files.append(f)
68
+
69
+
70
+ class DaskTransaction(Transaction):
71
+ def __init__(self, fs):
72
+ """
73
+ Parameters
74
+ ----------
75
+ fs: FileSystem instance
76
+ """
77
+ import distributed
78
+
79
+ super().__init__(fs)
80
+ client = distributed.default_client()
81
+ self.files = client.submit(FileActor, actor=True).result()
82
+
83
+ def complete(self, commit=True):
84
+ """Finish transaction: commit or discard all deferred files"""
85
+ if commit:
86
+ self.files.commit().result()
87
+ else:
88
+ self.files.discard().result()
89
+ self.fs._intrans = False
90
+ self.fs = None
parrot/lib/python3.10/site-packages/fsspec/utils.py ADDED
@@ -0,0 +1,740 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import contextlib
4
+ import logging
5
+ import math
6
+ import os
7
+ import pathlib
8
+ import re
9
+ import sys
10
+ import tempfile
11
+ from functools import partial
12
+ from hashlib import md5
13
+ from importlib.metadata import version
14
+ from typing import (
15
+ IO,
16
+ TYPE_CHECKING,
17
+ Any,
18
+ Callable,
19
+ Iterable,
20
+ Iterator,
21
+ Sequence,
22
+ TypeVar,
23
+ )
24
+ from urllib.parse import urlsplit
25
+
26
+ if TYPE_CHECKING:
27
+ from typing_extensions import TypeGuard
28
+
29
+ from fsspec.spec import AbstractFileSystem
30
+
31
+
32
+ DEFAULT_BLOCK_SIZE = 5 * 2**20
33
+
34
+ T = TypeVar("T")
35
+
36
+
37
+ def infer_storage_options(
38
+ urlpath: str, inherit_storage_options: dict[str, Any] | None = None
39
+ ) -> dict[str, Any]:
40
+ """Infer storage options from URL path and merge it with existing storage
41
+ options.
42
+
43
+ Parameters
44
+ ----------
45
+ urlpath: str or unicode
46
+ Either local absolute file path or URL (hdfs://namenode:8020/file.csv)
47
+ inherit_storage_options: dict (optional)
48
+ Its contents will get merged with the inferred information from the
49
+ given path
50
+
51
+ Returns
52
+ -------
53
+ Storage options dict.
54
+
55
+ Examples
56
+ --------
57
+ >>> infer_storage_options('/mnt/datasets/test.csv') # doctest: +SKIP
58
+ {"protocol": "file", "path", "/mnt/datasets/test.csv"}
59
+ >>> infer_storage_options(
60
+ ... 'hdfs://username:pwd@node:123/mnt/datasets/test.csv?q=1',
61
+ ... inherit_storage_options={'extra': 'value'},
62
+ ... ) # doctest: +SKIP
63
+ {"protocol": "hdfs", "username": "username", "password": "pwd",
64
+ "host": "node", "port": 123, "path": "/mnt/datasets/test.csv",
65
+ "url_query": "q=1", "extra": "value"}
66
+ """
67
+ # Handle Windows paths including disk name in this special case
68
+ if (
69
+ re.match(r"^[a-zA-Z]:[\\/]", urlpath)
70
+ or re.match(r"^[a-zA-Z0-9]+://", urlpath) is None
71
+ ):
72
+ return {"protocol": "file", "path": urlpath}
73
+
74
+ parsed_path = urlsplit(urlpath)
75
+ protocol = parsed_path.scheme or "file"
76
+ if parsed_path.fragment:
77
+ path = "#".join([parsed_path.path, parsed_path.fragment])
78
+ else:
79
+ path = parsed_path.path
80
+ if protocol == "file":
81
+ # Special case parsing file protocol URL on Windows according to:
82
+ # https://msdn.microsoft.com/en-us/library/jj710207.aspx
83
+ windows_path = re.match(r"^/([a-zA-Z])[:|]([\\/].*)$", path)
84
+ if windows_path:
85
+ path = "%s:%s" % windows_path.groups()
86
+
87
+ if protocol in ["http", "https"]:
88
+ # for HTTP, we don't want to parse, as requests will anyway
89
+ return {"protocol": protocol, "path": urlpath}
90
+
91
+ options: dict[str, Any] = {"protocol": protocol, "path": path}
92
+
93
+ if parsed_path.netloc:
94
+ # Parse `hostname` from netloc manually because `parsed_path.hostname`
95
+ # lowercases the hostname which is not always desirable (e.g. in S3):
96
+ # https://github.com/dask/dask/issues/1417
97
+ options["host"] = parsed_path.netloc.rsplit("@", 1)[-1].rsplit(":", 1)[0]
98
+
99
+ if protocol in ("s3", "s3a", "gcs", "gs"):
100
+ options["path"] = options["host"] + options["path"]
101
+ else:
102
+ options["host"] = options["host"]
103
+ if parsed_path.port:
104
+ options["port"] = parsed_path.port
105
+ if parsed_path.username:
106
+ options["username"] = parsed_path.username
107
+ if parsed_path.password:
108
+ options["password"] = parsed_path.password
109
+
110
+ if parsed_path.query:
111
+ options["url_query"] = parsed_path.query
112
+ if parsed_path.fragment:
113
+ options["url_fragment"] = parsed_path.fragment
114
+
115
+ if inherit_storage_options:
116
+ update_storage_options(options, inherit_storage_options)
117
+
118
+ return options
119
+
120
+
121
+ def update_storage_options(
122
+ options: dict[str, Any], inherited: dict[str, Any] | None = None
123
+ ) -> None:
124
+ if not inherited:
125
+ inherited = {}
126
+ collisions = set(options) & set(inherited)
127
+ if collisions:
128
+ for collision in collisions:
129
+ if options.get(collision) != inherited.get(collision):
130
+ raise KeyError(
131
+ f"Collision between inferred and specified storage "
132
+ f"option:\n{collision}"
133
+ )
134
+ options.update(inherited)
135
+
136
+
137
+ # Compression extensions registered via fsspec.compression.register_compression
138
+ compressions: dict[str, str] = {}
139
+
140
+
141
+ def infer_compression(filename: str) -> str | None:
142
+ """Infer compression, if available, from filename.
143
+
144
+ Infer a named compression type, if registered and available, from filename
145
+ extension. This includes builtin (gz, bz2, zip) compressions, as well as
146
+ optional compressions. See fsspec.compression.register_compression.
147
+ """
148
+ extension = os.path.splitext(filename)[-1].strip(".").lower()
149
+ if extension in compressions:
150
+ return compressions[extension]
151
+ return None
152
+
153
+
154
+ def build_name_function(max_int: float) -> Callable[[int], str]:
155
+ """Returns a function that receives a single integer
156
+ and returns it as a string padded by enough zero characters
157
+ to align with maximum possible integer
158
+
159
+ >>> name_f = build_name_function(57)
160
+
161
+ >>> name_f(7)
162
+ '07'
163
+ >>> name_f(31)
164
+ '31'
165
+ >>> build_name_function(1000)(42)
166
+ '0042'
167
+ >>> build_name_function(999)(42)
168
+ '042'
169
+ >>> build_name_function(0)(0)
170
+ '0'
171
+ """
172
+ # handle corner cases max_int is 0 or exact power of 10
173
+ max_int += 1e-8
174
+
175
+ pad_length = int(math.ceil(math.log10(max_int)))
176
+
177
+ def name_function(i: int) -> str:
178
+ return str(i).zfill(pad_length)
179
+
180
+ return name_function
181
+
182
+
183
+ def seek_delimiter(file: IO[bytes], delimiter: bytes, blocksize: int) -> bool:
184
+ r"""Seek current file to file start, file end, or byte after delimiter seq.
185
+
186
+ Seeks file to next chunk delimiter, where chunks are defined on file start,
187
+ a delimiting sequence, and file end. Use file.tell() to see location afterwards.
188
+ Note that file start is a valid split, so must be at offset > 0 to seek for
189
+ delimiter.
190
+
191
+ Parameters
192
+ ----------
193
+ file: a file
194
+ delimiter: bytes
195
+ a delimiter like ``b'\n'`` or message sentinel, matching file .read() type
196
+ blocksize: int
197
+ Number of bytes to read from the file at once.
198
+
199
+
200
+ Returns
201
+ -------
202
+ Returns True if a delimiter was found, False if at file start or end.
203
+
204
+ """
205
+
206
+ if file.tell() == 0:
207
+ # beginning-of-file, return without seek
208
+ return False
209
+
210
+ # Interface is for binary IO, with delimiter as bytes, but initialize last
211
+ # with result of file.read to preserve compatibility with text IO.
212
+ last: bytes | None = None
213
+ while True:
214
+ current = file.read(blocksize)
215
+ if not current:
216
+ # end-of-file without delimiter
217
+ return False
218
+ full = last + current if last else current
219
+ try:
220
+ if delimiter in full:
221
+ i = full.index(delimiter)
222
+ file.seek(file.tell() - (len(full) - i) + len(delimiter))
223
+ return True
224
+ elif len(current) < blocksize:
225
+ # end-of-file without delimiter
226
+ return False
227
+ except (OSError, ValueError):
228
+ pass
229
+ last = full[-len(delimiter) :]
230
+
231
+
232
+ def read_block(
233
+ f: IO[bytes],
234
+ offset: int,
235
+ length: int | None,
236
+ delimiter: bytes | None = None,
237
+ split_before: bool = False,
238
+ ) -> bytes:
239
+ """Read a block of bytes from a file
240
+
241
+ Parameters
242
+ ----------
243
+ f: File
244
+ Open file
245
+ offset: int
246
+ Byte offset to start read
247
+ length: int
248
+ Number of bytes to read, read through end of file if None
249
+ delimiter: bytes (optional)
250
+ Ensure reading starts and stops at delimiter bytestring
251
+ split_before: bool (optional)
252
+ Start/stop read *before* delimiter bytestring.
253
+
254
+
255
+ If using the ``delimiter=`` keyword argument we ensure that the read
256
+ starts and stops at delimiter boundaries that follow the locations
257
+ ``offset`` and ``offset + length``. If ``offset`` is zero then we
258
+ start at zero, regardless of delimiter. The bytestring returned WILL
259
+ include the terminating delimiter string.
260
+
261
+ Examples
262
+ --------
263
+
264
+ >>> from io import BytesIO # doctest: +SKIP
265
+ >>> f = BytesIO(b'Alice, 100\\nBob, 200\\nCharlie, 300') # doctest: +SKIP
266
+ >>> read_block(f, 0, 13) # doctest: +SKIP
267
+ b'Alice, 100\\nBo'
268
+
269
+ >>> read_block(f, 0, 13, delimiter=b'\\n') # doctest: +SKIP
270
+ b'Alice, 100\\nBob, 200\\n'
271
+
272
+ >>> read_block(f, 10, 10, delimiter=b'\\n') # doctest: +SKIP
273
+ b'Bob, 200\\nCharlie, 300'
274
+ """
275
+ if delimiter:
276
+ f.seek(offset)
277
+ found_start_delim = seek_delimiter(f, delimiter, 2**16)
278
+ if length is None:
279
+ return f.read()
280
+ start = f.tell()
281
+ length -= start - offset
282
+
283
+ f.seek(start + length)
284
+ found_end_delim = seek_delimiter(f, delimiter, 2**16)
285
+ end = f.tell()
286
+
287
+ # Adjust split location to before delimiter if seek found the
288
+ # delimiter sequence, not start or end of file.
289
+ if found_start_delim and split_before:
290
+ start -= len(delimiter)
291
+
292
+ if found_end_delim and split_before:
293
+ end -= len(delimiter)
294
+
295
+ offset = start
296
+ length = end - start
297
+
298
+ f.seek(offset)
299
+
300
+ # TODO: allow length to be None and read to the end of the file?
301
+ assert length is not None
302
+ b = f.read(length)
303
+ return b
304
+
305
+
306
+ def tokenize(*args: Any, **kwargs: Any) -> str:
307
+ """Deterministic token
308
+
309
+ (modified from dask.base)
310
+
311
+ >>> tokenize([1, 2, '3'])
312
+ '9d71491b50023b06fc76928e6eddb952'
313
+
314
+ >>> tokenize('Hello') == tokenize('Hello')
315
+ True
316
+ """
317
+ if kwargs:
318
+ args += (kwargs,)
319
+ try:
320
+ h = md5(str(args).encode())
321
+ except ValueError:
322
+ # FIPS systems: https://github.com/fsspec/filesystem_spec/issues/380
323
+ h = md5(str(args).encode(), usedforsecurity=False)
324
+ return h.hexdigest()
325
+
326
+
327
+ def stringify_path(filepath: str | os.PathLike[str] | pathlib.Path) -> str:
328
+ """Attempt to convert a path-like object to a string.
329
+
330
+ Parameters
331
+ ----------
332
+ filepath: object to be converted
333
+
334
+ Returns
335
+ -------
336
+ filepath_str: maybe a string version of the object
337
+
338
+ Notes
339
+ -----
340
+ Objects supporting the fspath protocol are coerced according to its
341
+ __fspath__ method.
342
+
343
+ For backwards compatibility with older Python version, pathlib.Path
344
+ objects are specially coerced.
345
+
346
+ Any other object is passed through unchanged, which includes bytes,
347
+ strings, buffers, or anything else that's not even path-like.
348
+ """
349
+ if isinstance(filepath, str):
350
+ return filepath
351
+ elif hasattr(filepath, "__fspath__"):
352
+ return filepath.__fspath__()
353
+ elif hasattr(filepath, "path"):
354
+ return filepath.path
355
+ else:
356
+ return filepath # type: ignore[return-value]
357
+
358
+
359
+ def make_instance(
360
+ cls: Callable[..., T], args: Sequence[Any], kwargs: dict[str, Any]
361
+ ) -> T:
362
+ inst = cls(*args, **kwargs)
363
+ inst._determine_worker() # type: ignore[attr-defined]
364
+ return inst
365
+
366
+
367
+ def common_prefix(paths: Iterable[str]) -> str:
368
+ """For a list of paths, find the shortest prefix common to all"""
369
+ parts = [p.split("/") for p in paths]
370
+ lmax = min(len(p) for p in parts)
371
+ end = 0
372
+ for i in range(lmax):
373
+ end = all(p[i] == parts[0][i] for p in parts)
374
+ if not end:
375
+ break
376
+ i += end
377
+ return "/".join(parts[0][:i])
378
+
379
+
380
+ def other_paths(
381
+ paths: list[str],
382
+ path2: str | list[str],
383
+ exists: bool = False,
384
+ flatten: bool = False,
385
+ ) -> list[str]:
386
+ """In bulk file operations, construct a new file tree from a list of files
387
+
388
+ Parameters
389
+ ----------
390
+ paths: list of str
391
+ The input file tree
392
+ path2: str or list of str
393
+ Root to construct the new list in. If this is already a list of str, we just
394
+ assert it has the right number of elements.
395
+ exists: bool (optional)
396
+ For a str destination, it is already exists (and is a dir), files should
397
+ end up inside.
398
+ flatten: bool (optional)
399
+ Whether to flatten the input directory tree structure so that the output files
400
+ are in the same directory.
401
+
402
+ Returns
403
+ -------
404
+ list of str
405
+ """
406
+
407
+ if isinstance(path2, str):
408
+ path2 = path2.rstrip("/")
409
+
410
+ if flatten:
411
+ path2 = ["/".join((path2, p.split("/")[-1])) for p in paths]
412
+ else:
413
+ cp = common_prefix(paths)
414
+ if exists:
415
+ cp = cp.rsplit("/", 1)[0]
416
+ if not cp and all(not s.startswith("/") for s in paths):
417
+ path2 = ["/".join([path2, p]) for p in paths]
418
+ else:
419
+ path2 = [p.replace(cp, path2, 1) for p in paths]
420
+ else:
421
+ assert len(paths) == len(path2)
422
+ return path2
423
+
424
+
425
+ def is_exception(obj: Any) -> bool:
426
+ return isinstance(obj, BaseException)
427
+
428
+
429
+ def isfilelike(f: Any) -> TypeGuard[IO[bytes]]:
430
+ for attr in ["read", "close", "tell"]:
431
+ if not hasattr(f, attr):
432
+ return False
433
+ return True
434
+
435
+
436
+ def get_protocol(url: str) -> str:
437
+ url = stringify_path(url)
438
+ parts = re.split(r"(\:\:|\://)", url, maxsplit=1)
439
+ if len(parts) > 1:
440
+ return parts[0]
441
+ return "file"
442
+
443
+
444
+ def can_be_local(path: str) -> bool:
445
+ """Can the given URL be used with open_local?"""
446
+ from fsspec import get_filesystem_class
447
+
448
+ try:
449
+ return getattr(get_filesystem_class(get_protocol(path)), "local_file", False)
450
+ except (ValueError, ImportError):
451
+ # not in registry or import failed
452
+ return False
453
+
454
+
455
+ def get_package_version_without_import(name: str) -> str | None:
456
+ """For given package name, try to find the version without importing it
457
+
458
+ Import and package.__version__ is still the backup here, so an import
459
+ *might* happen.
460
+
461
+ Returns either the version string, or None if the package
462
+ or the version was not readily found.
463
+ """
464
+ if name in sys.modules:
465
+ mod = sys.modules[name]
466
+ if hasattr(mod, "__version__"):
467
+ return mod.__version__
468
+ try:
469
+ return version(name)
470
+ except: # noqa: E722
471
+ pass
472
+ try:
473
+ import importlib
474
+
475
+ mod = importlib.import_module(name)
476
+ return mod.__version__
477
+ except (ImportError, AttributeError):
478
+ return None
479
+
480
+
481
+ def setup_logging(
482
+ logger: logging.Logger | None = None,
483
+ logger_name: str | None = None,
484
+ level: str = "DEBUG",
485
+ clear: bool = True,
486
+ ) -> logging.Logger:
487
+ if logger is None and logger_name is None:
488
+ raise ValueError("Provide either logger object or logger name")
489
+ logger = logger or logging.getLogger(logger_name)
490
+ handle = logging.StreamHandler()
491
+ formatter = logging.Formatter(
492
+ "%(asctime)s - %(name)s - %(levelname)s - %(funcName)s -- %(message)s"
493
+ )
494
+ handle.setFormatter(formatter)
495
+ if clear:
496
+ logger.handlers.clear()
497
+ logger.addHandler(handle)
498
+ logger.setLevel(level)
499
+ return logger
500
+
501
+
502
+ def _unstrip_protocol(name: str, fs: AbstractFileSystem) -> str:
503
+ return fs.unstrip_protocol(name)
504
+
505
+
506
+ def mirror_from(
507
+ origin_name: str, methods: Iterable[str]
508
+ ) -> Callable[[type[T]], type[T]]:
509
+ """Mirror attributes and methods from the given
510
+ origin_name attribute of the instance to the
511
+ decorated class"""
512
+
513
+ def origin_getter(method: str, self: Any) -> Any:
514
+ origin = getattr(self, origin_name)
515
+ return getattr(origin, method)
516
+
517
+ def wrapper(cls: type[T]) -> type[T]:
518
+ for method in methods:
519
+ wrapped_method = partial(origin_getter, method)
520
+ setattr(cls, method, property(wrapped_method))
521
+ return cls
522
+
523
+ return wrapper
524
+
525
+
526
+ @contextlib.contextmanager
527
+ def nullcontext(obj: T) -> Iterator[T]:
528
+ yield obj
529
+
530
+
531
+ def merge_offset_ranges(
532
+ paths: list[str],
533
+ starts: list[int] | int,
534
+ ends: list[int] | int,
535
+ max_gap: int = 0,
536
+ max_block: int | None = None,
537
+ sort: bool = True,
538
+ ) -> tuple[list[str], list[int], list[int]]:
539
+ """Merge adjacent byte-offset ranges when the inter-range
540
+ gap is <= `max_gap`, and when the merged byte range does not
541
+ exceed `max_block` (if specified). By default, this function
542
+ will re-order the input paths and byte ranges to ensure sorted
543
+ order. If the user can guarantee that the inputs are already
544
+ sorted, passing `sort=False` will skip the re-ordering.
545
+ """
546
+ # Check input
547
+ if not isinstance(paths, list):
548
+ raise TypeError
549
+ if not isinstance(starts, list):
550
+ starts = [starts] * len(paths)
551
+ if not isinstance(ends, list):
552
+ ends = [ends] * len(paths)
553
+ if len(starts) != len(paths) or len(ends) != len(paths):
554
+ raise ValueError
555
+
556
+ # Early Return
557
+ if len(starts) <= 1:
558
+ return paths, starts, ends
559
+
560
+ starts = [s or 0 for s in starts]
561
+ # Sort by paths and then ranges if `sort=True`
562
+ if sort:
563
+ paths, starts, ends = (
564
+ list(v)
565
+ for v in zip(
566
+ *sorted(
567
+ zip(paths, starts, ends),
568
+ )
569
+ )
570
+ )
571
+
572
+ if paths:
573
+ # Loop through the coupled `paths`, `starts`, and
574
+ # `ends`, and merge adjacent blocks when appropriate
575
+ new_paths = paths[:1]
576
+ new_starts = starts[:1]
577
+ new_ends = ends[:1]
578
+ for i in range(1, len(paths)):
579
+ if paths[i] == paths[i - 1] and new_ends[-1] is None:
580
+ continue
581
+ elif (
582
+ paths[i] != paths[i - 1]
583
+ or ((starts[i] - new_ends[-1]) > max_gap)
584
+ or (max_block is not None and (ends[i] - new_starts[-1]) > max_block)
585
+ ):
586
+ # Cannot merge with previous block.
587
+ # Add new `paths`, `starts`, and `ends` elements
588
+ new_paths.append(paths[i])
589
+ new_starts.append(starts[i])
590
+ new_ends.append(ends[i])
591
+ else:
592
+ # Merge with previous block by updating the
593
+ # last element of `ends`
594
+ new_ends[-1] = ends[i]
595
+ return new_paths, new_starts, new_ends
596
+
597
+ # `paths` is empty. Just return input lists
598
+ return paths, starts, ends
599
+
600
+
601
+ def file_size(filelike: IO[bytes]) -> int:
602
+ """Find length of any open read-mode file-like"""
603
+ pos = filelike.tell()
604
+ try:
605
+ return filelike.seek(0, 2)
606
+ finally:
607
+ filelike.seek(pos)
608
+
609
+
610
+ @contextlib.contextmanager
611
+ def atomic_write(path: str, mode: str = "wb"):
612
+ """
613
+ A context manager that opens a temporary file next to `path` and, on exit,
614
+ replaces `path` with the temporary file, thereby updating `path`
615
+ atomically.
616
+ """
617
+ fd, fn = tempfile.mkstemp(
618
+ dir=os.path.dirname(path), prefix=os.path.basename(path) + "-"
619
+ )
620
+ try:
621
+ with open(fd, mode) as fp:
622
+ yield fp
623
+ except BaseException:
624
+ with contextlib.suppress(FileNotFoundError):
625
+ os.unlink(fn)
626
+ raise
627
+ else:
628
+ os.replace(fn, path)
629
+
630
+
631
+ def _translate(pat, STAR, QUESTION_MARK):
632
+ # Copied from: https://github.com/python/cpython/pull/106703.
633
+ res: list[str] = []
634
+ add = res.append
635
+ i, n = 0, len(pat)
636
+ while i < n:
637
+ c = pat[i]
638
+ i = i + 1
639
+ if c == "*":
640
+ # compress consecutive `*` into one
641
+ if (not res) or res[-1] is not STAR:
642
+ add(STAR)
643
+ elif c == "?":
644
+ add(QUESTION_MARK)
645
+ elif c == "[":
646
+ j = i
647
+ if j < n and pat[j] == "!":
648
+ j = j + 1
649
+ if j < n and pat[j] == "]":
650
+ j = j + 1
651
+ while j < n and pat[j] != "]":
652
+ j = j + 1
653
+ if j >= n:
654
+ add("\\[")
655
+ else:
656
+ stuff = pat[i:j]
657
+ if "-" not in stuff:
658
+ stuff = stuff.replace("\\", r"\\")
659
+ else:
660
+ chunks = []
661
+ k = i + 2 if pat[i] == "!" else i + 1
662
+ while True:
663
+ k = pat.find("-", k, j)
664
+ if k < 0:
665
+ break
666
+ chunks.append(pat[i:k])
667
+ i = k + 1
668
+ k = k + 3
669
+ chunk = pat[i:j]
670
+ if chunk:
671
+ chunks.append(chunk)
672
+ else:
673
+ chunks[-1] += "-"
674
+ # Remove empty ranges -- invalid in RE.
675
+ for k in range(len(chunks) - 1, 0, -1):
676
+ if chunks[k - 1][-1] > chunks[k][0]:
677
+ chunks[k - 1] = chunks[k - 1][:-1] + chunks[k][1:]
678
+ del chunks[k]
679
+ # Escape backslashes and hyphens for set difference (--).
680
+ # Hyphens that create ranges shouldn't be escaped.
681
+ stuff = "-".join(
682
+ s.replace("\\", r"\\").replace("-", r"\-") for s in chunks
683
+ )
684
+ # Escape set operations (&&, ~~ and ||).
685
+ stuff = re.sub(r"([&~|])", r"\\\1", stuff)
686
+ i = j + 1
687
+ if not stuff:
688
+ # Empty range: never match.
689
+ add("(?!)")
690
+ elif stuff == "!":
691
+ # Negated empty range: match any character.
692
+ add(".")
693
+ else:
694
+ if stuff[0] == "!":
695
+ stuff = "^" + stuff[1:]
696
+ elif stuff[0] in ("^", "["):
697
+ stuff = "\\" + stuff
698
+ add(f"[{stuff}]")
699
+ else:
700
+ add(re.escape(c))
701
+ assert i == n
702
+ return res
703
+
704
+
705
+ def glob_translate(pat):
706
+ # Copied from: https://github.com/python/cpython/pull/106703.
707
+ # The keyword parameters' values are fixed to:
708
+ # recursive=True, include_hidden=True, seps=None
709
+ """Translate a pathname with shell wildcards to a regular expression."""
710
+ if os.path.altsep:
711
+ seps = os.path.sep + os.path.altsep
712
+ else:
713
+ seps = os.path.sep
714
+ escaped_seps = "".join(map(re.escape, seps))
715
+ any_sep = f"[{escaped_seps}]" if len(seps) > 1 else escaped_seps
716
+ not_sep = f"[^{escaped_seps}]"
717
+ one_last_segment = f"{not_sep}+"
718
+ one_segment = f"{one_last_segment}{any_sep}"
719
+ any_segments = f"(?:.+{any_sep})?"
720
+ any_last_segments = ".*"
721
+ results = []
722
+ parts = re.split(any_sep, pat)
723
+ last_part_idx = len(parts) - 1
724
+ for idx, part in enumerate(parts):
725
+ if part == "*":
726
+ results.append(one_segment if idx < last_part_idx else one_last_segment)
727
+ continue
728
+ if part == "**":
729
+ results.append(any_segments if idx < last_part_idx else any_last_segments)
730
+ continue
731
+ elif "**" in part:
732
+ raise ValueError(
733
+ "Invalid pattern: '**' can only be an entire path component"
734
+ )
735
+ if part:
736
+ results.extend(_translate(part, f"{not_sep}*", not_sep))
737
+ if idx < last_part_idx:
738
+ results.append(any_sep)
739
+ res = "".join(results)
740
+ return rf"(?s:{res})\Z"
parrot/lib/python3.10/site-packages/huggingface_hub/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (19.2 kB). View file
 
parrot/lib/python3.10/site-packages/huggingface_hub/__pycache__/_commit_api.cpython-310.pyc ADDED
Binary file (24.6 kB). View file
 
parrot/lib/python3.10/site-packages/huggingface_hub/__pycache__/_inference_endpoints.cpython-310.pyc ADDED
Binary file (15.6 kB). View file
 
parrot/lib/python3.10/site-packages/huggingface_hub/__pycache__/_login.cpython-310.pyc ADDED
Binary file (16.7 kB). View file
 
parrot/lib/python3.10/site-packages/huggingface_hub/__pycache__/_snapshot_download.cpython-310.pyc ADDED
Binary file (10.6 kB). View file
 
parrot/lib/python3.10/site-packages/huggingface_hub/__pycache__/_space_api.cpython-310.pyc ADDED
Binary file (5.4 kB). View file
 
parrot/lib/python3.10/site-packages/huggingface_hub/__pycache__/_upload_large_folder.cpython-310.pyc ADDED
Binary file (16.3 kB). View file
 
parrot/lib/python3.10/site-packages/huggingface_hub/__pycache__/_webhooks_payload.cpython-310.pyc ADDED
Binary file (3.93 kB). View file
 
parrot/lib/python3.10/site-packages/huggingface_hub/__pycache__/community.cpython-310.pyc ADDED
Binary file (13 kB). View file
 
parrot/lib/python3.10/site-packages/huggingface_hub/__pycache__/errors.cpython-310.pyc ADDED
Binary file (12 kB). View file
 
parrot/lib/python3.10/site-packages/huggingface_hub/__pycache__/fastai_utils.cpython-310.pyc ADDED
Binary file (14.3 kB). View file
 
parrot/lib/python3.10/site-packages/huggingface_hub/__pycache__/hub_mixin.cpython-310.pyc ADDED
Binary file (28.7 kB). View file
 
parrot/lib/python3.10/site-packages/huggingface_hub/__pycache__/lfs.cpython-310.pyc ADDED
Binary file (13.8 kB). View file
 
parrot/lib/python3.10/site-packages/huggingface_hub/__pycache__/repocard.cpython-310.pyc ADDED
Binary file (29.6 kB). View file
 
parrot/lib/python3.10/site-packages/huggingface_hub/__pycache__/repocard_data.cpython-310.pyc ADDED
Binary file (28.7 kB). View file
 
parrot/lib/python3.10/site-packages/huggingface_hub/__pycache__/repository.cpython-310.pyc ADDED
Binary file (45.6 kB). View file
 
parrot/lib/python3.10/site-packages/huggingface_hub/commands/__init__.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from abc import ABC, abstractmethod
16
+ from argparse import _SubParsersAction
17
+
18
+
19
+ class BaseHuggingfaceCLICommand(ABC):
20
+ @staticmethod
21
+ @abstractmethod
22
+ def register_subcommand(parser: _SubParsersAction):
23
+ raise NotImplementedError()
24
+
25
+ @abstractmethod
26
+ def run(self):
27
+ raise NotImplementedError()