tbrokowski commited on
Commit
5e9bca6
·
verified ·
1 Parent(s): 13eb656

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. venv/lib/python3.13/site-packages/charset_normalizer-3.4.3.dist-info/INSTALLER +1 -0
  2. venv/lib/python3.13/site-packages/charset_normalizer-3.4.3.dist-info/METADATA +750 -0
  3. venv/lib/python3.13/site-packages/charset_normalizer-3.4.3.dist-info/RECORD +35 -0
  4. venv/lib/python3.13/site-packages/charset_normalizer-3.4.3.dist-info/WHEEL +6 -0
  5. venv/lib/python3.13/site-packages/charset_normalizer-3.4.3.dist-info/entry_points.txt +2 -0
  6. venv/lib/python3.13/site-packages/charset_normalizer-3.4.3.dist-info/top_level.txt +1 -0
  7. venv/lib/python3.13/site-packages/filelock-3.19.1.dist-info/INSTALLER +1 -0
  8. venv/lib/python3.13/site-packages/filelock-3.19.1.dist-info/METADATA +42 -0
  9. venv/lib/python3.13/site-packages/filelock-3.19.1.dist-info/RECORD +24 -0
  10. venv/lib/python3.13/site-packages/filelock-3.19.1.dist-info/WHEEL +4 -0
  11. venv/lib/python3.13/site-packages/fsspec/__init__.py +71 -0
  12. venv/lib/python3.13/site-packages/fsspec/_version.py +34 -0
  13. venv/lib/python3.13/site-packages/fsspec/archive.py +75 -0
  14. venv/lib/python3.13/site-packages/fsspec/asyn.py +1097 -0
  15. venv/lib/python3.13/site-packages/fsspec/caching.py +1004 -0
  16. venv/lib/python3.13/site-packages/fsspec/callbacks.py +324 -0
  17. venv/lib/python3.13/site-packages/fsspec/compression.py +182 -0
  18. venv/lib/python3.13/site-packages/fsspec/config.py +131 -0
  19. venv/lib/python3.13/site-packages/fsspec/conftest.py +55 -0
  20. venv/lib/python3.13/site-packages/fsspec/core.py +743 -0
  21. venv/lib/python3.13/site-packages/fsspec/dircache.py +98 -0
  22. venv/lib/python3.13/site-packages/fsspec/exceptions.py +18 -0
  23. venv/lib/python3.13/site-packages/fsspec/fuse.py +324 -0
  24. venv/lib/python3.13/site-packages/fsspec/generic.py +394 -0
  25. venv/lib/python3.13/site-packages/fsspec/gui.py +417 -0
  26. venv/lib/python3.13/site-packages/fsspec/json.py +117 -0
  27. venv/lib/python3.13/site-packages/fsspec/mapping.py +251 -0
  28. venv/lib/python3.13/site-packages/fsspec/parquet.py +541 -0
  29. venv/lib/python3.13/site-packages/fsspec/registry.py +330 -0
  30. venv/lib/python3.13/site-packages/fsspec/spec.py +2270 -0
  31. venv/lib/python3.13/site-packages/fsspec/transaction.py +90 -0
  32. venv/lib/python3.13/site-packages/fsspec/utils.py +737 -0
  33. venv/lib/python3.13/site-packages/hf_xet/__init__.py +5 -0
  34. venv/lib/python3.13/site-packages/huggingface_hub/__init__.py +1548 -0
  35. venv/lib/python3.13/site-packages/huggingface_hub/_commit_api.py +908 -0
  36. venv/lib/python3.13/site-packages/huggingface_hub/_commit_scheduler.py +353 -0
  37. venv/lib/python3.13/site-packages/huggingface_hub/_inference_endpoints.py +413 -0
  38. venv/lib/python3.13/site-packages/huggingface_hub/_jobs_api.py +301 -0
  39. venv/lib/python3.13/site-packages/huggingface_hub/_local_folder.py +447 -0
  40. venv/lib/python3.13/site-packages/huggingface_hub/_login.py +520 -0
  41. venv/lib/python3.13/site-packages/huggingface_hub/_oauth.py +460 -0
  42. venv/lib/python3.13/site-packages/huggingface_hub/_snapshot_download.py +343 -0
  43. venv/lib/python3.13/site-packages/huggingface_hub/_space_api.py +168 -0
  44. venv/lib/python3.13/site-packages/huggingface_hub/_tensorboard_logger.py +193 -0
  45. venv/lib/python3.13/site-packages/huggingface_hub/_upload_large_folder.py +755 -0
  46. venv/lib/python3.13/site-packages/huggingface_hub/_webhooks_payload.py +137 -0
  47. venv/lib/python3.13/site-packages/huggingface_hub/_webhooks_server.py +388 -0
  48. venv/lib/python3.13/site-packages/huggingface_hub/community.py +355 -0
  49. venv/lib/python3.13/site-packages/huggingface_hub/constants.py +294 -0
  50. venv/lib/python3.13/site-packages/huggingface_hub/dataclasses.py +484 -0
venv/lib/python3.13/site-packages/charset_normalizer-3.4.3.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
venv/lib/python3.13/site-packages/charset_normalizer-3.4.3.dist-info/METADATA ADDED
@@ -0,0 +1,750 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.4
2
+ Name: charset-normalizer
3
+ Version: 3.4.3
4
+ Summary: The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet.
5
+ Author-email: "Ahmed R. TAHRI" <tahri.ahmed@proton.me>
6
+ Maintainer-email: "Ahmed R. TAHRI" <tahri.ahmed@proton.me>
7
+ License: MIT
8
+ Project-URL: Changelog, https://github.com/jawah/charset_normalizer/blob/master/CHANGELOG.md
9
+ Project-URL: Documentation, https://charset-normalizer.readthedocs.io/
10
+ Project-URL: Code, https://github.com/jawah/charset_normalizer
11
+ Project-URL: Issue tracker, https://github.com/jawah/charset_normalizer/issues
12
+ Keywords: encoding,charset,charset-detector,detector,normalization,unicode,chardet,detect
13
+ Classifier: Development Status :: 5 - Production/Stable
14
+ Classifier: Intended Audience :: Developers
15
+ Classifier: Operating System :: OS Independent
16
+ Classifier: Programming Language :: Python
17
+ Classifier: Programming Language :: Python :: 3
18
+ Classifier: Programming Language :: Python :: 3.7
19
+ Classifier: Programming Language :: Python :: 3.8
20
+ Classifier: Programming Language :: Python :: 3.9
21
+ Classifier: Programming Language :: Python :: 3.10
22
+ Classifier: Programming Language :: Python :: 3.11
23
+ Classifier: Programming Language :: Python :: 3.12
24
+ Classifier: Programming Language :: Python :: 3.13
25
+ Classifier: Programming Language :: Python :: 3.14
26
+ Classifier: Programming Language :: Python :: 3 :: Only
27
+ Classifier: Programming Language :: Python :: Implementation :: CPython
28
+ Classifier: Programming Language :: Python :: Implementation :: PyPy
29
+ Classifier: Topic :: Text Processing :: Linguistic
30
+ Classifier: Topic :: Utilities
31
+ Classifier: Typing :: Typed
32
+ Requires-Python: >=3.7
33
+ Description-Content-Type: text/markdown
34
+ License-File: LICENSE
35
+ Provides-Extra: unicode-backport
36
+ Dynamic: license-file
37
+
38
+ <h1 align="center">Charset Detection, for Everyone 👋</h1>
39
+
40
+ <p align="center">
41
+ <sup>The Real First Universal Charset Detector</sup><br>
42
+ <a href="https://pypi.org/project/charset-normalizer">
43
+ <img src="https://img.shields.io/pypi/pyversions/charset_normalizer.svg?orange=blue" />
44
+ </a>
45
+ <a href="https://pepy.tech/project/charset-normalizer/">
46
+ <img alt="Download Count Total" src="https://static.pepy.tech/badge/charset-normalizer/month" />
47
+ </a>
48
+ <a href="https://bestpractices.coreinfrastructure.org/projects/7297">
49
+ <img src="https://bestpractices.coreinfrastructure.org/projects/7297/badge">
50
+ </a>
51
+ </p>
52
+ <p align="center">
53
+ <sup><i>Featured Packages</i></sup><br>
54
+ <a href="https://github.com/jawah/niquests">
55
+ <img alt="Static Badge" src="https://img.shields.io/badge/Niquests-Most_Advanced_HTTP_Client-cyan">
56
+ </a>
57
+ <a href="https://github.com/jawah/wassima">
58
+ <img alt="Static Badge" src="https://img.shields.io/badge/Wassima-Certifi_Replacement-cyan">
59
+ </a>
60
+ </p>
61
+ <p align="center">
62
+ <sup><i>In other language (unofficial port - by the community)</i></sup><br>
63
+ <a href="https://github.com/nickspring/charset-normalizer-rs">
64
+ <img alt="Static Badge" src="https://img.shields.io/badge/Rust-red">
65
+ </a>
66
+ </p>
67
+
68
+ > A library that helps you read text from an unknown charset encoding.<br /> Motivated by `chardet`,
69
+ > I'm trying to resolve the issue by taking a new approach.
70
+ > All IANA character set names for which the Python core library provides codecs are supported.
71
+
72
+ <p align="center">
73
+ >>>>> <a href="https://charsetnormalizerweb.ousret.now.sh" target="_blank">👉 Try Me Online Now, Then Adopt Me 👈 </a> <<<<<
74
+ </p>
75
+
76
+ This project offers you an alternative to **Universal Charset Encoding Detector**, also known as **Chardet**.
77
+
78
+ | Feature | [Chardet](https://github.com/chardet/chardet) | Charset Normalizer | [cChardet](https://github.com/PyYoshi/cChardet) |
79
+ |--------------------------------------------------|:---------------------------------------------:|:--------------------------------------------------------------------------------------------------:|:-----------------------------------------------:|
80
+ | `Fast` | ❌ | ✅ | ✅ |
81
+ | `Universal**` | ❌ | ✅ | ❌ |
82
+ | `Reliable` **without** distinguishable standards | ❌ | ✅ | ✅ |
83
+ | `Reliable` **with** distinguishable standards | ✅ | ✅ | ✅ |
84
+ | `License` | LGPL-2.1<br>_restrictive_ | MIT | MPL-1.1<br>_restrictive_ |
85
+ | `Native Python` | ✅ | ✅ | ❌ |
86
+ | `Detect spoken language` | ❌ | ✅ | N/A |
87
+ | `UnicodeDecodeError Safety` | ❌ | ✅ | ❌ |
88
+ | `Whl Size (min)` | 193.6 kB | 42 kB | ~200 kB |
89
+ | `Supported Encoding` | 33 | 🎉 [99](https://charset-normalizer.readthedocs.io/en/latest/user/support.html#supported-encodings) | 40 |
90
+
91
+ <p align="center">
92
+ <img src="https://i.imgflip.com/373iay.gif" alt="Reading Normalized Text" width="226"/><img src="https://media.tenor.com/images/c0180f70732a18b4965448d33adba3d0/tenor.gif" alt="Cat Reading Text" width="200"/>
93
+ </p>
94
+
95
+ *\*\* : They are clearly using specific code for a specific encoding even if covering most of used one*<br>
96
+
97
+ ## ⚡ Performance
98
+
99
+ This package offer better performance than its counterpart Chardet. Here are some numbers.
100
+
101
+ | Package | Accuracy | Mean per file (ms) | File per sec (est) |
102
+ |-----------------------------------------------|:--------:|:------------------:|:------------------:|
103
+ | [chardet](https://github.com/chardet/chardet) | 86 % | 63 ms | 16 file/sec |
104
+ | charset-normalizer | **98 %** | **10 ms** | 100 file/sec |
105
+
106
+ | Package | 99th percentile | 95th percentile | 50th percentile |
107
+ |-----------------------------------------------|:---------------:|:---------------:|:---------------:|
108
+ | [chardet](https://github.com/chardet/chardet) | 265 ms | 71 ms | 7 ms |
109
+ | charset-normalizer | 100 ms | 50 ms | 5 ms |
110
+
111
+ _updated as of december 2024 using CPython 3.12_
112
+
113
+ Chardet's performance on larger file (1MB+) are very poor. Expect huge difference on large payload.
114
+
115
+ > Stats are generated using 400+ files using default parameters. More details on used files, see GHA workflows.
116
+ > And yes, these results might change at any time. The dataset can be updated to include more files.
117
+ > The actual delays heavily depends on your CPU capabilities. The factors should remain the same.
118
+ > Keep in mind that the stats are generous and that Chardet accuracy vs our is measured using Chardet initial capability
119
+ > (e.g. Supported Encoding) Challenge-them if you want.
120
+
121
+ ## ✨ Installation
122
+
123
+ Using pip:
124
+
125
+ ```sh
126
+ pip install charset-normalizer -U
127
+ ```
128
+
129
+ ## 🚀 Basic Usage
130
+
131
+ ### CLI
132
+ This package comes with a CLI.
133
+
134
+ ```
135
+ usage: normalizer [-h] [-v] [-a] [-n] [-m] [-r] [-f] [-t THRESHOLD]
136
+ file [file ...]
137
+
138
+ The Real First Universal Charset Detector. Discover originating encoding used
139
+ on text file. Normalize text to unicode.
140
+
141
+ positional arguments:
142
+ files File(s) to be analysed
143
+
144
+ optional arguments:
145
+ -h, --help show this help message and exit
146
+ -v, --verbose Display complementary information about file if any.
147
+ Stdout will contain logs about the detection process.
148
+ -a, --with-alternative
149
+ Output complementary possibilities if any. Top-level
150
+ JSON WILL be a list.
151
+ -n, --normalize Permit to normalize input file. If not set, program
152
+ does not write anything.
153
+ -m, --minimal Only output the charset detected to STDOUT. Disabling
154
+ JSON output.
155
+ -r, --replace Replace file when trying to normalize it instead of
156
+ creating a new one.
157
+ -f, --force Replace file without asking if you are sure, use this
158
+ flag with caution.
159
+ -t THRESHOLD, --threshold THRESHOLD
160
+ Define a custom maximum amount of chaos allowed in
161
+ decoded content. 0. <= chaos <= 1.
162
+ --version Show version information and exit.
163
+ ```
164
+
165
+ ```bash
166
+ normalizer ./data/sample.1.fr.srt
167
+ ```
168
+
169
+ or
170
+
171
+ ```bash
172
+ python -m charset_normalizer ./data/sample.1.fr.srt
173
+ ```
174
+
175
+ 🎉 Since version 1.4.0 the CLI produce easily usable stdout result in JSON format.
176
+
177
+ ```json
178
+ {
179
+ "path": "/home/default/projects/charset_normalizer/data/sample.1.fr.srt",
180
+ "encoding": "cp1252",
181
+ "encoding_aliases": [
182
+ "1252",
183
+ "windows_1252"
184
+ ],
185
+ "alternative_encodings": [
186
+ "cp1254",
187
+ "cp1256",
188
+ "cp1258",
189
+ "iso8859_14",
190
+ "iso8859_15",
191
+ "iso8859_16",
192
+ "iso8859_3",
193
+ "iso8859_9",
194
+ "latin_1",
195
+ "mbcs"
196
+ ],
197
+ "language": "French",
198
+ "alphabets": [
199
+ "Basic Latin",
200
+ "Latin-1 Supplement"
201
+ ],
202
+ "has_sig_or_bom": false,
203
+ "chaos": 0.149,
204
+ "coherence": 97.152,
205
+ "unicode_path": null,
206
+ "is_preferred": true
207
+ }
208
+ ```
209
+
210
+ ### Python
211
+ *Just print out normalized text*
212
+ ```python
213
+ from charset_normalizer import from_path
214
+
215
+ results = from_path('./my_subtitle.srt')
216
+
217
+ print(str(results.best()))
218
+ ```
219
+
220
+ *Upgrade your code without effort*
221
+ ```python
222
+ from charset_normalizer import detect
223
+ ```
224
+
225
+ The above code will behave the same as **chardet**. We ensure that we offer the best (reasonable) BC result possible.
226
+
227
+ See the docs for advanced usage : [readthedocs.io](https://charset-normalizer.readthedocs.io/en/latest/)
228
+
229
+ ## 😇 Why
230
+
231
+ When I started using Chardet, I noticed that it was not suited to my expectations, and I wanted to propose a
232
+ reliable alternative using a completely different method. Also! I never back down on a good challenge!
233
+
234
+ I **don't care** about the **originating charset** encoding, because **two different tables** can
235
+ produce **two identical rendered string.**
236
+ What I want is to get readable text, the best I can.
237
+
238
+ In a way, **I'm brute forcing text decoding.** How cool is that ? 😎
239
+
240
+ Don't confuse package **ftfy** with charset-normalizer or chardet. ftfy goal is to repair Unicode string whereas charset-normalizer to convert raw file in unknown encoding to unicode.
241
+
242
+ ## 🍰 How
243
+
244
+ - Discard all charset encoding table that could not fit the binary content.
245
+ - Measure noise, or the mess once opened (by chunks) with a corresponding charset encoding.
246
+ - Extract matches with the lowest mess detected.
247
+ - Additionally, we measure coherence / probe for a language.
248
+
249
+ **Wait a minute**, what is noise/mess and coherence according to **YOU ?**
250
+
251
+ *Noise :* I opened hundred of text files, **written by humans**, with the wrong encoding table. **I observed**, then
252
+ **I established** some ground rules about **what is obvious** when **it seems like** a mess (aka. defining noise in rendered text).
253
+ I know that my interpretation of what is noise is probably incomplete, feel free to contribute in order to
254
+ improve or rewrite it.
255
+
256
+ *Coherence :* For each language there is on earth, we have computed ranked letter appearance occurrences (the best we can). So I thought
257
+ that intel is worth something here. So I use those records against decoded text to check if I can detect intelligent design.
258
+
259
+ ## ⚡ Known limitations
260
+
261
+ - Language detection is unreliable when text contains two or more languages sharing identical letters. (eg. HTML (english tags) + Turkish content (Sharing Latin characters))
262
+ - Every charset detector heavily depends on sufficient content. In common cases, do not bother run detection on very tiny content.
263
+
264
+ ## ⚠️ About Python EOLs
265
+
266
+ **If you are running:**
267
+
268
+ - Python >=2.7,<3.5: Unsupported
269
+ - Python 3.5: charset-normalizer < 2.1
270
+ - Python 3.6: charset-normalizer < 3.1
271
+ - Python 3.7: charset-normalizer < 4.0
272
+
273
+ Upgrade your Python interpreter as soon as possible.
274
+
275
+ ## 👤 Contributing
276
+
277
+ Contributions, issues and feature requests are very much welcome.<br />
278
+ Feel free to check [issues page](https://github.com/ousret/charset_normalizer/issues) if you want to contribute.
279
+
280
+ ## 📝 License
281
+
282
+ Copyright © [Ahmed TAHRI @Ousret](https://github.com/Ousret).<br />
283
+ This project is [MIT](https://github.com/Ousret/charset_normalizer/blob/master/LICENSE) licensed.
284
+
285
+ Characters frequencies used in this project © 2012 [Denny Vrandečić](http://simia.net/letters/)
286
+
287
+ ## 💼 For Enterprise
288
+
289
+ Professional support for charset-normalizer is available as part of the [Tidelift
290
+ Subscription][1]. Tidelift gives software development teams a single source for
291
+ purchasing and maintaining their software, with professional grade assurances
292
+ from the experts who know it best, while seamlessly integrating with existing
293
+ tools.
294
+
295
+ [1]: https://tidelift.com/subscription/pkg/pypi-charset-normalizer?utm_source=pypi-charset-normalizer&utm_medium=readme
296
+
297
+ [![OpenSSF Best Practices](https://www.bestpractices.dev/projects/7297/badge)](https://www.bestpractices.dev/projects/7297)
298
+
299
+ # Changelog
300
+ All notable changes to charset-normalizer will be documented in this file. This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
301
+ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
302
+
303
+ ## [3.4.3](https://github.com/Ousret/charset_normalizer/compare/3.4.2...3.4.3) (2025-08-09)
304
+
305
+ ### Changed
306
+ - mypy(c) is no longer a required dependency at build time if `CHARSET_NORMALIZER_USE_MYPYC` isn't set to `1`. (#595) (#583)
307
+ - automatically lower confidence on small bytes samples that are not Unicode in `detect` output legacy function. (#391)
308
+
309
+ ### Added
310
+ - Custom build backend to overcome inability to mark mypy as an optional dependency in the build phase.
311
+ - Support for Python 3.14
312
+
313
+ ### Fixed
314
+ - sdist archive contained useless directories.
315
+ - automatically fallback on valid UTF-16 or UTF-32 even if the md says it's noisy. (#633)
316
+
317
+ ### Misc
318
+ - SBOM are automatically published to the relevant GitHub release to comply with regulatory changes.
319
+ Each published wheel comes with its SBOM. We choose CycloneDX as the format.
320
+ - Prebuilt optimized wheel are no longer distributed by default for CPython 3.7 due to a change in cibuildwheel.
321
+
322
+ ## [3.4.2](https://github.com/Ousret/charset_normalizer/compare/3.4.1...3.4.2) (2025-05-02)
323
+
324
+ ### Fixed
325
+ - Addressed the DeprecationWarning in our CLI regarding `argparse.FileType` by backporting the target class into the package. (#591)
326
+ - Improved the overall reliability of the detector with CJK Ideographs. (#605) (#587)
327
+
328
+ ### Changed
329
+ - Optional mypyc compilation upgraded to version 1.15 for Python >= 3.8
330
+
331
+ ## [3.4.1](https://github.com/Ousret/charset_normalizer/compare/3.4.0...3.4.1) (2024-12-24)
332
+
333
+ ### Changed
334
+ - Project metadata are now stored using `pyproject.toml` instead of `setup.cfg` using setuptools as the build backend.
335
+ - Enforce annotation delayed loading for a simpler and consistent types in the project.
336
+ - Optional mypyc compilation upgraded to version 1.14 for Python >= 3.8
337
+
338
+ ### Added
339
+ - pre-commit configuration.
340
+ - noxfile.
341
+
342
+ ### Removed
343
+ - `build-requirements.txt` as per using `pyproject.toml` native build configuration.
344
+ - `bin/integration.py` and `bin/serve.py` in favor of downstream integration test (see noxfile).
345
+ - `setup.cfg` in favor of `pyproject.toml` metadata configuration.
346
+ - Unused `utils.range_scan` function.
347
+
348
+ ### Fixed
349
+ - Converting content to Unicode bytes may insert `utf_8` instead of preferred `utf-8`. (#572)
350
+ - Deprecation warning "'count' is passed as positional argument" when converting to Unicode bytes on Python 3.13+
351
+
352
+ ## [3.4.0](https://github.com/Ousret/charset_normalizer/compare/3.3.2...3.4.0) (2024-10-08)
353
+
354
+ ### Added
355
+ - Argument `--no-preemptive` in the CLI to prevent the detector to search for hints.
356
+ - Support for Python 3.13 (#512)
357
+
358
+ ### Fixed
359
+ - Relax the TypeError exception thrown when trying to compare a CharsetMatch with anything else than a CharsetMatch.
360
+ - Improved the general reliability of the detector based on user feedbacks. (#520) (#509) (#498) (#407) (#537)
361
+ - Declared charset in content (preemptive detection) not changed when converting to utf-8 bytes. (#381)
362
+
363
+ ## [3.3.2](https://github.com/Ousret/charset_normalizer/compare/3.3.1...3.3.2) (2023-10-31)
364
+
365
+ ### Fixed
366
+ - Unintentional memory usage regression when using large payload that match several encoding (#376)
367
+ - Regression on some detection case showcased in the documentation (#371)
368
+
369
+ ### Added
370
+ - Noise (md) probe that identify malformed arabic representation due to the presence of letters in isolated form (credit to my wife)
371
+
372
+ ## [3.3.1](https://github.com/Ousret/charset_normalizer/compare/3.3.0...3.3.1) (2023-10-22)
373
+
374
+ ### Changed
375
+ - Optional mypyc compilation upgraded to version 1.6.1 for Python >= 3.8
376
+ - Improved the general detection reliability based on reports from the community
377
+
378
+ ## [3.3.0](https://github.com/Ousret/charset_normalizer/compare/3.2.0...3.3.0) (2023-09-30)
379
+
380
+ ### Added
381
+ - Allow to execute the CLI (e.g. normalizer) through `python -m charset_normalizer.cli` or `python -m charset_normalizer`
382
+ - Support for 9 forgotten encoding that are supported by Python but unlisted in `encoding.aliases` as they have no alias (#323)
383
+
384
+ ### Removed
385
+ - (internal) Redundant utils.is_ascii function and unused function is_private_use_only
386
+ - (internal) charset_normalizer.assets is moved inside charset_normalizer.constant
387
+
388
+ ### Changed
389
+ - (internal) Unicode code blocks in constants are updated using the latest v15.0.0 definition to improve detection
390
+ - Optional mypyc compilation upgraded to version 1.5.1 for Python >= 3.8
391
+
392
+ ### Fixed
393
+ - Unable to properly sort CharsetMatch when both chaos/noise and coherence were close due to an unreachable condition in \_\_lt\_\_ (#350)
394
+
395
+ ## [3.2.0](https://github.com/Ousret/charset_normalizer/compare/3.1.0...3.2.0) (2023-06-07)
396
+
397
+ ### Changed
398
+ - Typehint for function `from_path` no longer enforce `PathLike` as its first argument
399
+ - Minor improvement over the global detection reliability
400
+
401
+ ### Added
402
+ - Introduce function `is_binary` that relies on main capabilities, and optimized to detect binaries
403
+ - Propagate `enable_fallback` argument throughout `from_bytes`, `from_path`, and `from_fp` that allow a deeper control over the detection (default True)
404
+ - Explicit support for Python 3.12
405
+
406
+ ### Fixed
407
+ - Edge case detection failure where a file would contain 'very-long' camel cased word (Issue #289)
408
+
409
+ ## [3.1.0](https://github.com/Ousret/charset_normalizer/compare/3.0.1...3.1.0) (2023-03-06)
410
+
411
+ ### Added
412
+ - Argument `should_rename_legacy` for legacy function `detect` and disregard any new arguments without errors (PR #262)
413
+
414
+ ### Removed
415
+ - Support for Python 3.6 (PR #260)
416
+
417
+ ### Changed
418
+ - Optional speedup provided by mypy/c 1.0.1
419
+
420
+ ## [3.0.1](https://github.com/Ousret/charset_normalizer/compare/3.0.0...3.0.1) (2022-11-18)
421
+
422
+ ### Fixed
423
+ - Multi-bytes cutter/chunk generator did not always cut correctly (PR #233)
424
+
425
+ ### Changed
426
+ - Speedup provided by mypy/c 0.990 on Python >= 3.7
427
+
428
+ ## [3.0.0](https://github.com/Ousret/charset_normalizer/compare/2.1.1...3.0.0) (2022-10-20)
429
+
430
+ ### Added
431
+ - Extend the capability of explain=True when cp_isolation contains at most two entries (min one), will log in details of the Mess-detector results
432
+ - Support for alternative language frequency set in charset_normalizer.assets.FREQUENCIES
433
+ - Add parameter `language_threshold` in `from_bytes`, `from_path` and `from_fp` to adjust the minimum expected coherence ratio
434
+ - `normalizer --version` now specify if current version provide extra speedup (meaning mypyc compilation whl)
435
+
436
+ ### Changed
437
+ - Build with static metadata using 'build' frontend
438
+ - Make the language detection stricter
439
+ - Optional: Module `md.py` can be compiled using Mypyc to provide an extra speedup up to 4x faster than v2.1
440
+
441
+ ### Fixed
442
+ - CLI with opt --normalize fail when using full path for files
443
+ - TooManyAccentuatedPlugin induce false positive on the mess detection when too few alpha character have been fed to it
444
+ - Sphinx warnings when generating the documentation
445
+
446
+ ### Removed
447
+ - Coherence detector no longer return 'Simple English' instead return 'English'
448
+ - Coherence detector no longer return 'Classical Chinese' instead return 'Chinese'
449
+ - Breaking: Method `first()` and `best()` from CharsetMatch
450
+ - UTF-7 will no longer appear as "detected" without a recognized SIG/mark (is unreliable/conflict with ASCII)
451
+ - Breaking: Class aliases CharsetDetector, CharsetDoctor, CharsetNormalizerMatch and CharsetNormalizerMatches
452
+ - Breaking: Top-level function `normalize`
453
+ - Breaking: Properties `chaos_secondary_pass`, `coherence_non_latin` and `w_counter` from CharsetMatch
454
+ - Support for the backport `unicodedata2`
455
+
456
+ ## [3.0.0rc1](https://github.com/Ousret/charset_normalizer/compare/3.0.0b2...3.0.0rc1) (2022-10-18)
457
+
458
+ ### Added
459
+ - Extend the capability of explain=True when cp_isolation contains at most two entries (min one), will log in details of the Mess-detector results
460
+ - Support for alternative language frequency set in charset_normalizer.assets.FREQUENCIES
461
+ - Add parameter `language_threshold` in `from_bytes`, `from_path` and `from_fp` to adjust the minimum expected coherence ratio
462
+
463
+ ### Changed
464
+ - Build with static metadata using 'build' frontend
465
+ - Make the language detection stricter
466
+
467
+ ### Fixed
468
+ - CLI with opt --normalize fail when using full path for files
469
+ - TooManyAccentuatedPlugin induce false positive on the mess detection when too few alpha character have been fed to it
470
+
471
+ ### Removed
472
+ - Coherence detector no longer return 'Simple English' instead return 'English'
473
+ - Coherence detector no longer return 'Classical Chinese' instead return 'Chinese'
474
+
475
+ ## [3.0.0b2](https://github.com/Ousret/charset_normalizer/compare/3.0.0b1...3.0.0b2) (2022-08-21)
476
+
477
+ ### Added
478
+ - `normalizer --version` now specify if current version provide extra speedup (meaning mypyc compilation whl)
479
+
480
+ ### Removed
481
+ - Breaking: Method `first()` and `best()` from CharsetMatch
482
+ - UTF-7 will no longer appear as "detected" without a recognized SIG/mark (is unreliable/conflict with ASCII)
483
+
484
+ ### Fixed
485
+ - Sphinx warnings when generating the documentation
486
+
487
+ ## [3.0.0b1](https://github.com/Ousret/charset_normalizer/compare/2.1.0...3.0.0b1) (2022-08-15)
488
+
489
+ ### Changed
490
+ - Optional: Module `md.py` can be compiled using Mypyc to provide an extra speedup up to 4x faster than v2.1
491
+
492
+ ### Removed
493
+ - Breaking: Class aliases CharsetDetector, CharsetDoctor, CharsetNormalizerMatch and CharsetNormalizerMatches
494
+ - Breaking: Top-level function `normalize`
495
+ - Breaking: Properties `chaos_secondary_pass`, `coherence_non_latin` and `w_counter` from CharsetMatch
496
+ - Support for the backport `unicodedata2`
497
+
498
+ ## [2.1.1](https://github.com/Ousret/charset_normalizer/compare/2.1.0...2.1.1) (2022-08-19)
499
+
500
+ ### Deprecated
501
+ - Function `normalize` scheduled for removal in 3.0
502
+
503
+ ### Changed
504
+ - Removed useless call to decode in fn is_unprintable (#206)
505
+
506
+ ### Fixed
507
+ - Third-party library (i18n xgettext) crashing not recognizing utf_8 (PEP 263) with underscore from [@aleksandernovikov](https://github.com/aleksandernovikov) (#204)
508
+
509
+ ## [2.1.0](https://github.com/Ousret/charset_normalizer/compare/2.0.12...2.1.0) (2022-06-19)
510
+
511
+ ### Added
512
+ - Output the Unicode table version when running the CLI with `--version` (PR #194)
513
+
514
+ ### Changed
515
+ - Re-use decoded buffer for single byte character sets from [@nijel](https://github.com/nijel) (PR #175)
516
+ - Fixing some performance bottlenecks from [@deedy5](https://github.com/deedy5) (PR #183)
517
+
518
+ ### Fixed
519
+ - Workaround potential bug in cpython with Zero Width No-Break Space located in Arabic Presentation Forms-B, Unicode 1.1 not acknowledged as space (PR #175)
520
+ - CLI default threshold aligned with the API threshold from [@oleksandr-kuzmenko](https://github.com/oleksandr-kuzmenko) (PR #181)
521
+
522
+ ### Removed
523
+ - Support for Python 3.5 (PR #192)
524
+
525
+ ### Deprecated
526
+ - Use of backport unicodedata from `unicodedata2` as Python is quickly catching up, scheduled for removal in 3.0 (PR #194)
527
+
528
+ ## [2.0.12](https://github.com/Ousret/charset_normalizer/compare/2.0.11...2.0.12) (2022-02-12)
529
+
530
+ ### Fixed
531
+ - ASCII miss-detection on rare cases (PR #170)
532
+
533
+ ## [2.0.11](https://github.com/Ousret/charset_normalizer/compare/2.0.10...2.0.11) (2022-01-30)
534
+
535
+ ### Added
536
+ - Explicit support for Python 3.11 (PR #164)
537
+
538
+ ### Changed
539
+ - The logging behavior have been completely reviewed, now using only TRACE and DEBUG levels (PR #163 #165)
540
+
541
+ ## [2.0.10](https://github.com/Ousret/charset_normalizer/compare/2.0.9...2.0.10) (2022-01-04)
542
+
543
+ ### Fixed
544
+ - Fallback match entries might lead to UnicodeDecodeError for large bytes sequence (PR #154)
545
+
546
+ ### Changed
547
+ - Skipping the language-detection (CD) on ASCII (PR #155)
548
+
549
+ ## [2.0.9](https://github.com/Ousret/charset_normalizer/compare/2.0.8...2.0.9) (2021-12-03)
550
+
551
+ ### Changed
552
+ - Moderating the logging impact (since 2.0.8) for specific environments (PR #147)
553
+
554
+ ### Fixed
555
+ - Wrong logging level applied when setting kwarg `explain` to True (PR #146)
556
+
557
+ ## [2.0.8](https://github.com/Ousret/charset_normalizer/compare/2.0.7...2.0.8) (2021-11-24)
558
+ ### Changed
559
+ - Improvement over Vietnamese detection (PR #126)
560
+ - MD improvement on trailing data and long foreign (non-pure latin) data (PR #124)
561
+ - Efficiency improvements in cd/alphabet_languages from [@adbar](https://github.com/adbar) (PR #122)
562
+ - call sum() without an intermediary list following PEP 289 recommendations from [@adbar](https://github.com/adbar) (PR #129)
563
+ - Code style as refactored by Sourcery-AI (PR #131)
564
+ - Minor adjustment on the MD around european words (PR #133)
565
+ - Remove and replace SRTs from assets / tests (PR #139)
566
+ - Initialize the library logger with a `NullHandler` by default from [@nmaynes](https://github.com/nmaynes) (PR #135)
567
+ - Setting kwarg `explain` to True will add provisionally (bounded to function lifespan) a specific stream handler (PR #135)
568
+
569
+ ### Fixed
570
+ - Fix large (misleading) sequence giving UnicodeDecodeError (PR #137)
571
+ - Avoid using too insignificant chunk (PR #137)
572
+
573
+ ### Added
574
+ - Add and expose function `set_logging_handler` to configure a specific StreamHandler from [@nmaynes](https://github.com/nmaynes) (PR #135)
575
+ - Add `CHANGELOG.md` entries, format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/) (PR #141)
576
+
577
+ ## [2.0.7](https://github.com/Ousret/charset_normalizer/compare/2.0.6...2.0.7) (2021-10-11)
578
+ ### Added
579
+ - Add support for Kazakh (Cyrillic) language detection (PR #109)
580
+
581
+ ### Changed
582
+ - Further, improve inferring the language from a given single-byte code page (PR #112)
583
+ - Vainly trying to leverage PEP263 when PEP3120 is not supported (PR #116)
584
+ - Refactoring for potential performance improvements in loops from [@adbar](https://github.com/adbar) (PR #113)
585
+ - Various detection improvement (MD+CD) (PR #117)
586
+
587
+ ### Removed
588
+ - Remove redundant logging entry about detected language(s) (PR #115)
589
+
590
+ ### Fixed
591
+ - Fix a minor inconsistency between Python 3.5 and other versions regarding language detection (PR #117 #102)
592
+
593
+ ## [2.0.6](https://github.com/Ousret/charset_normalizer/compare/2.0.5...2.0.6) (2021-09-18)
594
+ ### Fixed
595
+ - Unforeseen regression with the loss of the backward-compatibility with some older minor of Python 3.5.x (PR #100)
596
+ - Fix CLI crash when using --minimal output in certain cases (PR #103)
597
+
598
+ ### Changed
599
+ - Minor improvement to the detection efficiency (less than 1%) (PR #106 #101)
600
+
601
+ ## [2.0.5](https://github.com/Ousret/charset_normalizer/compare/2.0.4...2.0.5) (2021-09-14)
602
+ ### Changed
603
+ - The project now comply with: flake8, mypy, isort and black to ensure a better overall quality (PR #81)
604
+ - The BC-support with v1.x was improved, the old staticmethods are restored (PR #82)
605
+ - The Unicode detection is slightly improved (PR #93)
606
+ - Add syntax sugar \_\_bool\_\_ for results CharsetMatches list-container (PR #91)
607
+
608
+ ### Removed
609
+ - The project no longer raise warning on tiny content given for detection, will be simply logged as warning instead (PR #92)
610
+
611
+ ### Fixed
612
+ - In some rare case, the chunks extractor could cut in the middle of a multi-byte character and could mislead the mess detection (PR #95)
613
+ - Some rare 'space' characters could trip up the UnprintablePlugin/Mess detection (PR #96)
614
+ - The MANIFEST.in was not exhaustive (PR #78)
615
+
616
+ ## [2.0.4](https://github.com/Ousret/charset_normalizer/compare/2.0.3...2.0.4) (2021-07-30)
617
+ ### Fixed
618
+ - The CLI no longer raise an unexpected exception when no encoding has been found (PR #70)
619
+ - Fix accessing the 'alphabets' property when the payload contains surrogate characters (PR #68)
620
+ - The logger could mislead (explain=True) on detected languages and the impact of one MBCS match (PR #72)
621
+ - Submatch factoring could be wrong in rare edge cases (PR #72)
622
+ - Multiple files given to the CLI were ignored when publishing results to STDOUT. (After the first path) (PR #72)
623
+ - Fix line endings from CRLF to LF for certain project files (PR #67)
624
+
625
+ ### Changed
626
+ - Adjust the MD to lower the sensitivity, thus improving the global detection reliability (PR #69 #76)
627
+ - Allow fallback on specified encoding if any (PR #71)
628
+
629
+ ## [2.0.3](https://github.com/Ousret/charset_normalizer/compare/2.0.2...2.0.3) (2021-07-16)
630
+ ### Changed
631
+ - Part of the detection mechanism has been improved to be less sensitive, resulting in more accurate detection results. Especially ASCII. (PR #63)
632
+ - According to the community wishes, the detection will fall back on ASCII or UTF-8 in a last-resort case. (PR #64)
633
+
634
+ ## [2.0.2](https://github.com/Ousret/charset_normalizer/compare/2.0.1...2.0.2) (2021-07-15)
635
+ ### Fixed
636
+ - Empty/Too small JSON payload miss-detection fixed. Report from [@tseaver](https://github.com/tseaver) (PR #59)
637
+
638
+ ### Changed
639
+ - Don't inject unicodedata2 into sys.modules from [@akx](https://github.com/akx) (PR #57)
640
+
641
+ ## [2.0.1](https://github.com/Ousret/charset_normalizer/compare/2.0.0...2.0.1) (2021-07-13)
642
+ ### Fixed
643
+ - Make it work where there isn't a filesystem available, dropping assets frequencies.json. Report from [@sethmlarson](https://github.com/sethmlarson). (PR #55)
644
+ - Using explain=False permanently disable the verbose output in the current runtime (PR #47)
645
+ - One log entry (language target preemptive) was not show in logs when using explain=True (PR #47)
646
+ - Fix undesired exception (ValueError) on getitem of instance CharsetMatches (PR #52)
647
+
648
+ ### Changed
649
+ - Public function normalize default args values were not aligned with from_bytes (PR #53)
650
+
651
+ ### Added
652
+ - You may now use charset aliases in cp_isolation and cp_exclusion arguments (PR #47)
653
+
654
+ ## [2.0.0](https://github.com/Ousret/charset_normalizer/compare/1.4.1...2.0.0) (2021-07-02)
655
+ ### Changed
656
+ - 4x to 5 times faster than the previous 1.4.0 release. At least 2x faster than Chardet.
657
+ - Accent has been made on UTF-8 detection, should perform rather instantaneous.
658
+ - The backward compatibility with Chardet has been greatly improved. The legacy detect function returns an identical charset name whenever possible.
659
+ - The detection mechanism has been slightly improved, now Turkish content is detected correctly (most of the time)
660
+ - The program has been rewritten to ease the readability and maintainability. (+Using static typing)+
661
+ - utf_7 detection has been reinstated.
662
+
663
+ ### Removed
664
+ - This package no longer require anything when used with Python 3.5 (Dropped cached_property)
665
+ - Removed support for these languages: Catalan, Esperanto, Kazakh, Baque, Volapük, Azeri, Galician, Nynorsk, Macedonian, and Serbocroatian.
666
+ - The exception hook on UnicodeDecodeError has been removed.
667
+
668
+ ### Deprecated
669
+ - Methods coherence_non_latin, w_counter, chaos_secondary_pass of the class CharsetMatch are now deprecated and scheduled for removal in v3.0
670
+
671
+ ### Fixed
672
+ - The CLI output used the relative path of the file(s). Should be absolute.
673
+
674
+ ## [1.4.1](https://github.com/Ousret/charset_normalizer/compare/1.4.0...1.4.1) (2021-05-28)
675
+ ### Fixed
676
+ - Logger configuration/usage no longer conflict with others (PR #44)
677
+
678
+ ## [1.4.0](https://github.com/Ousret/charset_normalizer/compare/1.3.9...1.4.0) (2021-05-21)
679
+ ### Removed
680
+ - Using standard logging instead of using the package loguru.
681
+ - Dropping nose test framework in favor of the maintained pytest.
682
+ - Choose to not use dragonmapper package to help with gibberish Chinese/CJK text.
683
+ - Require cached_property only for Python 3.5 due to constraint. Dropping for every other interpreter version.
684
+ - Stop support for UTF-7 that does not contain a SIG.
685
+ - Dropping PrettyTable, replaced with pure JSON output in CLI.
686
+
687
+ ### Fixed
688
+ - BOM marker in a CharsetNormalizerMatch instance could be False in rare cases even if obviously present. Due to the sub-match factoring process.
689
+ - Not searching properly for the BOM when trying utf32/16 parent codec.
690
+
691
+ ### Changed
692
+ - Improving the package final size by compressing frequencies.json.
693
+ - Huge improvement over the larges payload.
694
+
695
+ ### Added
696
+ - CLI now produces JSON consumable output.
697
+ - Return ASCII if given sequences fit. Given reasonable confidence.
698
+
699
+ ## [1.3.9](https://github.com/Ousret/charset_normalizer/compare/1.3.8...1.3.9) (2021-05-13)
700
+
701
+ ### Fixed
702
+ - In some very rare cases, you may end up getting encode/decode errors due to a bad bytes payload (PR #40)
703
+
704
+ ## [1.3.8](https://github.com/Ousret/charset_normalizer/compare/1.3.7...1.3.8) (2021-05-12)
705
+
706
+ ### Fixed
707
+ - Empty given payload for detection may cause an exception if trying to access the `alphabets` property. (PR #39)
708
+
709
+ ## [1.3.7](https://github.com/Ousret/charset_normalizer/compare/1.3.6...1.3.7) (2021-05-12)
710
+
711
+ ### Fixed
712
+ - The legacy detect function should return UTF-8-SIG if sig is present in the payload. (PR #38)
713
+
714
+ ## [1.3.6](https://github.com/Ousret/charset_normalizer/compare/1.3.5...1.3.6) (2021-02-09)
715
+
716
+ ### Changed
717
+ - Amend the previous release to allow prettytable 2.0 (PR #35)
718
+
719
+ ## [1.3.5](https://github.com/Ousret/charset_normalizer/compare/1.3.4...1.3.5) (2021-02-08)
720
+
721
+ ### Fixed
722
+ - Fix error while using the package with a python pre-release interpreter (PR #33)
723
+
724
+ ### Changed
725
+ - Dependencies refactoring, constraints revised.
726
+
727
+ ### Added
728
+ - Add python 3.9 and 3.10 to the supported interpreters
729
+
730
+ MIT License
731
+
732
+ Copyright (c) 2025 TAHRI Ahmed R.
733
+
734
+ Permission is hereby granted, free of charge, to any person obtaining a copy
735
+ of this software and associated documentation files (the "Software"), to deal
736
+ in the Software without restriction, including without limitation the rights
737
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
738
+ copies of the Software, and to permit persons to whom the Software is
739
+ furnished to do so, subject to the following conditions:
740
+
741
+ The above copyright notice and this permission notice shall be included in all
742
+ copies or substantial portions of the Software.
743
+
744
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
745
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
746
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
747
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
748
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
749
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
750
+ SOFTWARE.
venv/lib/python3.13/site-packages/charset_normalizer-3.4.3.dist-info/RECORD ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ../../../bin/normalizer,sha256=je4pIiFxpVHCrwvmwGMBG59uOGIlEvBU6MtSPdwGxZg,252
2
+ charset_normalizer-3.4.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
3
+ charset_normalizer-3.4.3.dist-info/METADATA,sha256=nBNOskPUtcqHtaSPPaJafjXrlicPcPIgLFzpJQTgvaA,36700
4
+ charset_normalizer-3.4.3.dist-info/RECORD,,
5
+ charset_normalizer-3.4.3.dist-info/WHEEL,sha256=YOKbiIEZep2WTRrpKV0dvEfsSeIOVH2NqduVxoKQt6I,142
6
+ charset_normalizer-3.4.3.dist-info/entry_points.txt,sha256=ADSTKrkXZ3hhdOVFi6DcUEHQRS0xfxDIE_pEz4wLIXA,65
7
+ charset_normalizer-3.4.3.dist-info/licenses/LICENSE,sha256=bQ1Bv-FwrGx9wkjJpj4lTQ-0WmDVCoJX0K-SxuJJuIc,1071
8
+ charset_normalizer-3.4.3.dist-info/top_level.txt,sha256=7ASyzePr8_xuZWJsnqJjIBtyV8vhEo0wBCv1MPRRi3Q,19
9
+ charset_normalizer/__init__.py,sha256=OKRxRv2Zhnqk00tqkN0c1BtJjm165fWXLydE52IKuHc,1590
10
+ charset_normalizer/__main__.py,sha256=yzYxMR-IhKRHYwcSlavEv8oGdwxsR89mr2X09qXGdps,109
11
+ charset_normalizer/__pycache__/__init__.cpython-313.pyc,,
12
+ charset_normalizer/__pycache__/__main__.cpython-313.pyc,,
13
+ charset_normalizer/__pycache__/api.cpython-313.pyc,,
14
+ charset_normalizer/__pycache__/cd.cpython-313.pyc,,
15
+ charset_normalizer/__pycache__/constant.cpython-313.pyc,,
16
+ charset_normalizer/__pycache__/legacy.cpython-313.pyc,,
17
+ charset_normalizer/__pycache__/md.cpython-313.pyc,,
18
+ charset_normalizer/__pycache__/models.cpython-313.pyc,,
19
+ charset_normalizer/__pycache__/utils.cpython-313.pyc,,
20
+ charset_normalizer/__pycache__/version.cpython-313.pyc,,
21
+ charset_normalizer/api.py,sha256=V07i8aVeCD8T2fSia3C-fn0i9t8qQguEBhsqszg32Ns,22668
22
+ charset_normalizer/cd.py,sha256=WKTo1HDb-H9HfCDc3Bfwq5jzS25Ziy9SE2a74SgTq88,12522
23
+ charset_normalizer/cli/__init__.py,sha256=D8I86lFk2-py45JvqxniTirSj_sFyE6sjaY_0-G1shc,136
24
+ charset_normalizer/cli/__main__.py,sha256=dMaXG6IJXRvqq8z2tig7Qb83-BpWTln55ooiku5_uvg,12646
25
+ charset_normalizer/cli/__pycache__/__init__.cpython-313.pyc,,
26
+ charset_normalizer/cli/__pycache__/__main__.cpython-313.pyc,,
27
+ charset_normalizer/constant.py,sha256=7UVY4ldYhmQMHUdgQ_sgZmzcQ0xxYxpBunqSZ-XJZ8U,42713
28
+ charset_normalizer/legacy.py,sha256=sYBzSpzsRrg_wF4LP536pG64BItw7Tqtc3SMQAHvFLM,2731
29
+ charset_normalizer/md.cpython-313-darwin.so,sha256=ChECUH0FslhnG1PYcO5o6aM-2oZCkaUWT25s4jvxGfg,115744
30
+ charset_normalizer/md.py,sha256=-_oN3h3_X99nkFfqamD3yu45DC_wfk5odH0Tr_CQiXs,20145
31
+ charset_normalizer/md__mypyc.cpython-313-darwin.so,sha256=NFFZy-3yqs4pTfNsXB99fROI71j98NFkAXaFZ1yle80,532824
32
+ charset_normalizer/models.py,sha256=lKXhOnIPtiakbK3i__J9wpOfzx3JDTKj7Dn3Rg0VaRI,12394
33
+ charset_normalizer/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
34
+ charset_normalizer/utils.py,sha256=sTejPgrdlNsKNucZfJCxJ95lMTLA0ShHLLE3n5wpT9Q,12170
35
+ charset_normalizer/version.py,sha256=hBN3id1io4HMVPtyDn9IIRVShbBM0kgVs3haVtppZOE,115
venv/lib/python3.13/site-packages/charset_normalizer-3.4.3.dist-info/WHEEL ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (80.9.0)
3
+ Root-Is-Purelib: false
4
+ Tag: cp313-cp313-macosx_10_13_universal2
5
+ Generator: delocate 0.13.0
6
+
venv/lib/python3.13/site-packages/charset_normalizer-3.4.3.dist-info/entry_points.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ [console_scripts]
2
+ normalizer = charset_normalizer.cli:cli_detect
venv/lib/python3.13/site-packages/charset_normalizer-3.4.3.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ charset_normalizer
venv/lib/python3.13/site-packages/filelock-3.19.1.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
venv/lib/python3.13/site-packages/filelock-3.19.1.dist-info/METADATA ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.4
2
+ Name: filelock
3
+ Version: 3.19.1
4
+ Summary: A platform independent file lock.
5
+ Project-URL: Documentation, https://py-filelock.readthedocs.io
6
+ Project-URL: Homepage, https://github.com/tox-dev/py-filelock
7
+ Project-URL: Source, https://github.com/tox-dev/py-filelock
8
+ Project-URL: Tracker, https://github.com/tox-dev/py-filelock/issues
9
+ Maintainer-email: Bernát Gábor <gaborjbernat@gmail.com>
10
+ License-Expression: Unlicense
11
+ License-File: LICENSE
12
+ Keywords: application,cache,directory,log,user
13
+ Classifier: Development Status :: 5 - Production/Stable
14
+ Classifier: Intended Audience :: Developers
15
+ Classifier: License :: OSI Approved :: The Unlicense (Unlicense)
16
+ Classifier: Operating System :: OS Independent
17
+ Classifier: Programming Language :: Python
18
+ Classifier: Programming Language :: Python :: 3 :: Only
19
+ Classifier: Programming Language :: Python :: 3.9
20
+ Classifier: Programming Language :: Python :: 3.10
21
+ Classifier: Programming Language :: Python :: 3.11
22
+ Classifier: Programming Language :: Python :: 3.12
23
+ Classifier: Programming Language :: Python :: 3.13
24
+ Classifier: Topic :: Internet
25
+ Classifier: Topic :: Software Development :: Libraries
26
+ Classifier: Topic :: System
27
+ Requires-Python: >=3.9
28
+ Description-Content-Type: text/markdown
29
+
30
+ # filelock
31
+
32
+ [![PyPI](https://img.shields.io/pypi/v/filelock)](https://pypi.org/project/filelock/)
33
+ [![Supported Python
34
+ versions](https://img.shields.io/pypi/pyversions/filelock.svg)](https://pypi.org/project/filelock/)
35
+ [![Documentation
36
+ status](https://readthedocs.org/projects/py-filelock/badge/?version=latest)](https://py-filelock.readthedocs.io/en/latest/?badge=latest)
37
+ [![Code style:
38
+ black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black)
39
+ [![Downloads](https://static.pepy.tech/badge/filelock/month)](https://pepy.tech/project/filelock)
40
+ [![check](https://github.com/tox-dev/py-filelock/actions/workflows/check.yaml/badge.svg)](https://github.com/tox-dev/py-filelock/actions/workflows/check.yaml)
41
+
42
+ For more information checkout the [official documentation](https://py-filelock.readthedocs.io/en/latest/index.html).
venv/lib/python3.13/site-packages/filelock-3.19.1.dist-info/RECORD ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ filelock-3.19.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2
+ filelock-3.19.1.dist-info/METADATA,sha256=gi6Y1j1mac0141sJB_Qa2MTvhwySJg2EqGjdfhBA4Og,2108
3
+ filelock-3.19.1.dist-info/RECORD,,
4
+ filelock-3.19.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
5
+ filelock-3.19.1.dist-info/licenses/LICENSE,sha256=iNm062BXnBkew5HKBMFhMFctfu3EqG2qWL8oxuFMm80,1210
6
+ filelock/__init__.py,sha256=_t_-OAGXo_qyPa9lNQ1YnzVYEvSW3I0onPqzpomsVVg,1769
7
+ filelock/__pycache__/__init__.cpython-313.pyc,,
8
+ filelock/__pycache__/_api.cpython-313.pyc,,
9
+ filelock/__pycache__/_error.cpython-313.pyc,,
10
+ filelock/__pycache__/_soft.cpython-313.pyc,,
11
+ filelock/__pycache__/_unix.cpython-313.pyc,,
12
+ filelock/__pycache__/_util.cpython-313.pyc,,
13
+ filelock/__pycache__/_windows.cpython-313.pyc,,
14
+ filelock/__pycache__/asyncio.cpython-313.pyc,,
15
+ filelock/__pycache__/version.cpython-313.pyc,,
16
+ filelock/_api.py,sha256=2aATBeJ3-jtMj5OSm7EE539iNaTBsf13KXtcBMoi8oM,14545
17
+ filelock/_error.py,sha256=-5jMcjTu60YAvAO1UbqDD1GIEjVkwr8xCFwDBtMeYDg,787
18
+ filelock/_soft.py,sha256=haqtc_TB_KJbYv2a8iuEAclKuM4fMG1vTcp28sK919c,1711
19
+ filelock/_unix.py,sha256=eGOs4gDgZ-5fGnJUz-OkJDeZkAMzgvYcD8hVD6XH7e4,2351
20
+ filelock/_util.py,sha256=QHBoNFIYfbAThhotH3Q8E2acFc84wpG49-T-uu017ZE,1715
21
+ filelock/_windows.py,sha256=8k4XIBl_zZVfGC2gz0kEr8DZBvpNa8wdU9qeM1YrBb8,2179
22
+ filelock/asyncio.py,sha256=LD9yksC24FV0mh_hzgzVi4mmOjFgVVCb7ZLbLqJcqs4,12483
23
+ filelock/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
24
+ filelock/version.py,sha256=W0fQJGqi8xqGqxm8Edh-oPZAI3aJh5WPtMYpJb1FwKQ,513
venv/lib/python3.13/site-packages/filelock-3.19.1.dist-info/WHEEL ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: hatchling 1.27.0
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
venv/lib/python3.13/site-packages/fsspec/__init__.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from . import caching
2
+ from ._version import __version__ # noqa: F401
3
+ from .callbacks import Callback
4
+ from .compression import available_compressions
5
+ from .core import get_fs_token_paths, open, open_files, open_local, url_to_fs
6
+ from .exceptions import FSTimeoutError
7
+ from .mapping import FSMap, get_mapper
8
+ from .registry import (
9
+ available_protocols,
10
+ filesystem,
11
+ get_filesystem_class,
12
+ register_implementation,
13
+ registry,
14
+ )
15
+ from .spec import AbstractFileSystem
16
+
17
+ __all__ = [
18
+ "AbstractFileSystem",
19
+ "FSTimeoutError",
20
+ "FSMap",
21
+ "filesystem",
22
+ "register_implementation",
23
+ "get_filesystem_class",
24
+ "get_fs_token_paths",
25
+ "get_mapper",
26
+ "open",
27
+ "open_files",
28
+ "open_local",
29
+ "registry",
30
+ "caching",
31
+ "Callback",
32
+ "available_protocols",
33
+ "available_compressions",
34
+ "url_to_fs",
35
+ ]
36
+
37
+
38
+ def process_entries():
39
+ try:
40
+ from importlib.metadata import entry_points
41
+ except ImportError:
42
+ return
43
+ if entry_points is not None:
44
+ try:
45
+ eps = entry_points()
46
+ except TypeError:
47
+ pass # importlib-metadata < 0.8
48
+ else:
49
+ if hasattr(eps, "select"): # Python 3.10+ / importlib_metadata >= 3.9.0
50
+ specs = eps.select(group="fsspec.specs")
51
+ else:
52
+ specs = eps.get("fsspec.specs", [])
53
+ registered_names = {}
54
+ for spec in specs:
55
+ err_msg = f"Unable to load filesystem from {spec}"
56
+ name = spec.name
57
+ if name in registered_names:
58
+ continue
59
+ registered_names[name] = True
60
+ register_implementation(
61
+ name,
62
+ spec.value.replace(":", "."),
63
+ errtxt=err_msg,
64
+ # We take our implementations as the ones to overload with if
65
+ # for some reason we encounter some, may be the same, already
66
+ # registered
67
+ clobber=True,
68
+ )
69
+
70
+
71
+ process_entries()
venv/lib/python3.13/site-packages/fsspec/_version.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # file generated by setuptools-scm
2
+ # don't change, don't track in version control
3
+
4
+ __all__ = [
5
+ "__version__",
6
+ "__version_tuple__",
7
+ "version",
8
+ "version_tuple",
9
+ "__commit_id__",
10
+ "commit_id",
11
+ ]
12
+
13
+ TYPE_CHECKING = False
14
+ if TYPE_CHECKING:
15
+ from typing import Tuple
16
+ from typing import Union
17
+
18
+ VERSION_TUPLE = Tuple[Union[int, str], ...]
19
+ COMMIT_ID = Union[str, None]
20
+ else:
21
+ VERSION_TUPLE = object
22
+ COMMIT_ID = object
23
+
24
+ version: str
25
+ __version__: str
26
+ __version_tuple__: VERSION_TUPLE
27
+ version_tuple: VERSION_TUPLE
28
+ commit_id: COMMIT_ID
29
+ __commit_id__: COMMIT_ID
30
+
31
+ __version__ = version = '2025.9.0'
32
+ __version_tuple__ = version_tuple = (2025, 9, 0)
33
+
34
+ __commit_id__ = commit_id = None
venv/lib/python3.13/site-packages/fsspec/archive.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import operator
2
+
3
+ from fsspec import AbstractFileSystem
4
+ from fsspec.utils import tokenize
5
+
6
+
7
+ class AbstractArchiveFileSystem(AbstractFileSystem):
8
+ """
9
+ A generic superclass for implementing Archive-based filesystems.
10
+
11
+ Currently, it is shared amongst
12
+ :class:`~fsspec.implementations.zip.ZipFileSystem`,
13
+ :class:`~fsspec.implementations.libarchive.LibArchiveFileSystem` and
14
+ :class:`~fsspec.implementations.tar.TarFileSystem`.
15
+ """
16
+
17
+ def __str__(self):
18
+ return f"<Archive-like object {type(self).__name__} at {id(self)}>"
19
+
20
+ __repr__ = __str__
21
+
22
+ def ukey(self, path):
23
+ return tokenize(path, self.fo, self.protocol)
24
+
25
+ def _all_dirnames(self, paths):
26
+ """Returns *all* directory names for each path in paths, including intermediate
27
+ ones.
28
+
29
+ Parameters
30
+ ----------
31
+ paths: Iterable of path strings
32
+ """
33
+ if len(paths) == 0:
34
+ return set()
35
+
36
+ dirnames = {self._parent(path) for path in paths} - {self.root_marker}
37
+ return dirnames | self._all_dirnames(dirnames)
38
+
39
+ def info(self, path, **kwargs):
40
+ self._get_dirs()
41
+ path = self._strip_protocol(path)
42
+ if path in {"", "/"} and self.dir_cache:
43
+ return {"name": "", "type": "directory", "size": 0}
44
+ if path in self.dir_cache:
45
+ return self.dir_cache[path]
46
+ elif path + "/" in self.dir_cache:
47
+ return self.dir_cache[path + "/"]
48
+ else:
49
+ raise FileNotFoundError(path)
50
+
51
+ def ls(self, path, detail=True, **kwargs):
52
+ self._get_dirs()
53
+ paths = {}
54
+ for p, f in self.dir_cache.items():
55
+ p = p.rstrip("/")
56
+ if "/" in p:
57
+ root = p.rsplit("/", 1)[0]
58
+ else:
59
+ root = ""
60
+ if root == path.rstrip("/"):
61
+ paths[p] = f
62
+ elif all(
63
+ (a == b)
64
+ for a, b in zip(path.split("/"), [""] + p.strip("/").split("/"))
65
+ ):
66
+ # root directory entry
67
+ ppath = p.rstrip("/").split("/", 1)[0]
68
+ if ppath not in paths:
69
+ out = {"name": ppath, "size": 0, "type": "directory"}
70
+ paths[ppath] = out
71
+ if detail:
72
+ out = sorted(paths.values(), key=operator.itemgetter("name"))
73
+ return out
74
+ else:
75
+ return sorted(paths)
venv/lib/python3.13/site-packages/fsspec/asyn.py ADDED
@@ -0,0 +1,1097 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import asyncio.events
3
+ import functools
4
+ import inspect
5
+ import io
6
+ import numbers
7
+ import os
8
+ import re
9
+ import threading
10
+ from collections.abc import Iterable
11
+ from glob import has_magic
12
+ from typing import TYPE_CHECKING
13
+
14
+ from .callbacks import DEFAULT_CALLBACK
15
+ from .exceptions import FSTimeoutError
16
+ from .implementations.local import LocalFileSystem, make_path_posix, trailing_sep
17
+ from .spec import AbstractBufferedFile, AbstractFileSystem
18
+ from .utils import glob_translate, is_exception, other_paths
19
+
20
+ private = re.compile("_[^_]")
21
+ iothread = [None] # dedicated fsspec IO thread
22
+ loop = [None] # global event loop for any non-async instance
23
+ _lock = None # global lock placeholder
24
+ get_running_loop = asyncio.get_running_loop
25
+
26
+
27
+ def get_lock():
28
+ """Allocate or return a threading lock.
29
+
30
+ The lock is allocated on first use to allow setting one lock per forked process.
31
+ """
32
+ global _lock
33
+ if not _lock:
34
+ _lock = threading.Lock()
35
+ return _lock
36
+
37
+
38
+ def reset_lock():
39
+ """Reset the global lock.
40
+
41
+ This should be called only on the init of a forked process to reset the lock to
42
+ None, enabling the new forked process to get a new lock.
43
+ """
44
+ global _lock
45
+
46
+ iothread[0] = None
47
+ loop[0] = None
48
+ _lock = None
49
+
50
+
51
+ async def _runner(event, coro, result, timeout=None):
52
+ timeout = timeout if timeout else None # convert 0 or 0.0 to None
53
+ if timeout is not None:
54
+ coro = asyncio.wait_for(coro, timeout=timeout)
55
+ try:
56
+ result[0] = await coro
57
+ except Exception as ex:
58
+ result[0] = ex
59
+ finally:
60
+ event.set()
61
+
62
+
63
+ def sync(loop, func, *args, timeout=None, **kwargs):
64
+ """
65
+ Make loop run coroutine until it returns. Runs in other thread
66
+
67
+ Examples
68
+ --------
69
+ >>> fsspec.asyn.sync(fsspec.asyn.get_loop(), func, *args,
70
+ timeout=timeout, **kwargs)
71
+ """
72
+ timeout = timeout if timeout else None # convert 0 or 0.0 to None
73
+ # NB: if the loop is not running *yet*, it is OK to submit work
74
+ # and we will wait for it
75
+ if loop is None or loop.is_closed():
76
+ raise RuntimeError("Loop is not running")
77
+ try:
78
+ loop0 = asyncio.events.get_running_loop()
79
+ if loop0 is loop:
80
+ raise NotImplementedError("Calling sync() from within a running loop")
81
+ except NotImplementedError:
82
+ raise
83
+ except RuntimeError:
84
+ pass
85
+ coro = func(*args, **kwargs)
86
+ result = [None]
87
+ event = threading.Event()
88
+ asyncio.run_coroutine_threadsafe(_runner(event, coro, result, timeout), loop)
89
+ while True:
90
+ # this loops allows thread to get interrupted
91
+ if event.wait(1):
92
+ break
93
+ if timeout is not None:
94
+ timeout -= 1
95
+ if timeout < 0:
96
+ raise FSTimeoutError
97
+
98
+ return_result = result[0]
99
+ if isinstance(return_result, asyncio.TimeoutError):
100
+ # suppress asyncio.TimeoutError, raise FSTimeoutError
101
+ raise FSTimeoutError from return_result
102
+ elif isinstance(return_result, BaseException):
103
+ raise return_result
104
+ else:
105
+ return return_result
106
+
107
+
108
+ def sync_wrapper(func, obj=None):
109
+ """Given a function, make so can be called in blocking contexts
110
+
111
+ Leave obj=None if defining within a class. Pass the instance if attaching
112
+ as an attribute of the instance.
113
+ """
114
+
115
+ @functools.wraps(func)
116
+ def wrapper(*args, **kwargs):
117
+ self = obj or args[0]
118
+ return sync(self.loop, func, *args, **kwargs)
119
+
120
+ return wrapper
121
+
122
+
123
+ def get_loop():
124
+ """Create or return the default fsspec IO loop
125
+
126
+ The loop will be running on a separate thread.
127
+ """
128
+ if loop[0] is None:
129
+ with get_lock():
130
+ # repeat the check just in case the loop got filled between the
131
+ # previous two calls from another thread
132
+ if loop[0] is None:
133
+ loop[0] = asyncio.new_event_loop()
134
+ th = threading.Thread(target=loop[0].run_forever, name="fsspecIO")
135
+ th.daemon = True
136
+ th.start()
137
+ iothread[0] = th
138
+ return loop[0]
139
+
140
+
141
+ def reset_after_fork():
142
+ global lock
143
+ loop[0] = None
144
+ iothread[0] = None
145
+ lock = None
146
+
147
+
148
+ if hasattr(os, "register_at_fork"):
149
+ # should be posix; this will do nothing for spawn or forkserver subprocesses
150
+ os.register_at_fork(after_in_child=reset_after_fork)
151
+
152
+
153
+ if TYPE_CHECKING:
154
+ import resource
155
+
156
+ ResourceError = resource.error
157
+ else:
158
+ try:
159
+ import resource
160
+ except ImportError:
161
+ resource = None
162
+ ResourceError = OSError
163
+ else:
164
+ ResourceError = getattr(resource, "error", OSError)
165
+
166
+ _DEFAULT_BATCH_SIZE = 128
167
+ _NOFILES_DEFAULT_BATCH_SIZE = 1280
168
+
169
+
170
+ def _get_batch_size(nofiles=False):
171
+ from fsspec.config import conf
172
+
173
+ if nofiles:
174
+ if "nofiles_gather_batch_size" in conf:
175
+ return conf["nofiles_gather_batch_size"]
176
+ else:
177
+ if "gather_batch_size" in conf:
178
+ return conf["gather_batch_size"]
179
+ if nofiles:
180
+ return _NOFILES_DEFAULT_BATCH_SIZE
181
+ if resource is None:
182
+ return _DEFAULT_BATCH_SIZE
183
+
184
+ try:
185
+ soft_limit, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
186
+ except (ImportError, ValueError, ResourceError):
187
+ return _DEFAULT_BATCH_SIZE
188
+
189
+ if soft_limit == resource.RLIM_INFINITY:
190
+ return -1
191
+ else:
192
+ return soft_limit // 8
193
+
194
+
195
+ def running_async() -> bool:
196
+ """Being executed by an event loop?"""
197
+ try:
198
+ asyncio.get_running_loop()
199
+ return True
200
+ except RuntimeError:
201
+ return False
202
+
203
+
204
+ async def _run_coros_in_chunks(
205
+ coros,
206
+ batch_size=None,
207
+ callback=DEFAULT_CALLBACK,
208
+ timeout=None,
209
+ return_exceptions=False,
210
+ nofiles=False,
211
+ ):
212
+ """Run the given coroutines in chunks.
213
+
214
+ Parameters
215
+ ----------
216
+ coros: list of coroutines to run
217
+ batch_size: int or None
218
+ Number of coroutines to submit/wait on simultaneously.
219
+ If -1, then it will not be any throttling. If
220
+ None, it will be inferred from _get_batch_size()
221
+ callback: fsspec.callbacks.Callback instance
222
+ Gets a relative_update when each coroutine completes
223
+ timeout: number or None
224
+ If given, each coroutine times out after this time. Note that, since
225
+ there are multiple batches, the total run time of this function will in
226
+ general be longer
227
+ return_exceptions: bool
228
+ Same meaning as in asyncio.gather
229
+ nofiles: bool
230
+ If inferring the batch_size, does this operation involve local files?
231
+ If yes, you normally expect smaller batches.
232
+ """
233
+
234
+ if batch_size is None:
235
+ batch_size = _get_batch_size(nofiles=nofiles)
236
+
237
+ if batch_size == -1:
238
+ batch_size = len(coros)
239
+
240
+ assert batch_size > 0
241
+
242
+ async def _run_coro(coro, i):
243
+ try:
244
+ return await asyncio.wait_for(coro, timeout=timeout), i
245
+ except Exception as e:
246
+ if not return_exceptions:
247
+ raise
248
+ return e, i
249
+ finally:
250
+ callback.relative_update(1)
251
+
252
+ i = 0
253
+ n = len(coros)
254
+ results = [None] * n
255
+ pending = set()
256
+
257
+ while pending or i < n:
258
+ while len(pending) < batch_size and i < n:
259
+ pending.add(asyncio.ensure_future(_run_coro(coros[i], i)))
260
+ i += 1
261
+
262
+ if not pending:
263
+ break
264
+
265
+ done, pending = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED)
266
+ while done:
267
+ result, k = await done.pop()
268
+ results[k] = result
269
+
270
+ return results
271
+
272
+
273
+ # these methods should be implemented as async by any async-able backend
274
+ async_methods = [
275
+ "_ls",
276
+ "_cat_file",
277
+ "_get_file",
278
+ "_put_file",
279
+ "_rm_file",
280
+ "_cp_file",
281
+ "_pipe_file",
282
+ "_expand_path",
283
+ "_info",
284
+ "_isfile",
285
+ "_isdir",
286
+ "_exists",
287
+ "_walk",
288
+ "_glob",
289
+ "_find",
290
+ "_du",
291
+ "_size",
292
+ "_mkdir",
293
+ "_makedirs",
294
+ ]
295
+
296
+
297
+ class AsyncFileSystem(AbstractFileSystem):
298
+ """Async file operations, default implementations
299
+
300
+ Passes bulk operations to asyncio.gather for concurrent operation.
301
+
302
+ Implementations that have concurrent batch operations and/or async methods
303
+ should inherit from this class instead of AbstractFileSystem. Docstrings are
304
+ copied from the un-underscored method in AbstractFileSystem, if not given.
305
+ """
306
+
307
+ # note that methods do not have docstring here; they will be copied
308
+ # for _* methods and inferred for overridden methods.
309
+
310
+ async_impl = True
311
+ mirror_sync_methods = True
312
+ disable_throttling = False
313
+
314
+ def __init__(self, *args, asynchronous=False, loop=None, batch_size=None, **kwargs):
315
+ self.asynchronous = asynchronous
316
+ self._pid = os.getpid()
317
+ if not asynchronous:
318
+ self._loop = loop or get_loop()
319
+ else:
320
+ self._loop = None
321
+ self.batch_size = batch_size
322
+ super().__init__(*args, **kwargs)
323
+
324
+ @property
325
+ def loop(self):
326
+ if self._pid != os.getpid():
327
+ raise RuntimeError("This class is not fork-safe")
328
+ return self._loop
329
+
330
+ async def _rm_file(self, path, **kwargs):
331
+ raise NotImplementedError
332
+
333
+ async def _rm(self, path, recursive=False, batch_size=None, **kwargs):
334
+ # TODO: implement on_error
335
+ batch_size = batch_size or self.batch_size
336
+ path = await self._expand_path(path, recursive=recursive)
337
+ return await _run_coros_in_chunks(
338
+ [self._rm_file(p, **kwargs) for p in reversed(path)],
339
+ batch_size=batch_size,
340
+ nofiles=True,
341
+ )
342
+
343
+ async def _cp_file(self, path1, path2, **kwargs):
344
+ raise NotImplementedError
345
+
346
+ async def _mv_file(self, path1, path2):
347
+ await self._cp_file(path1, path2)
348
+ await self._rm_file(path1)
349
+
350
+ async def _copy(
351
+ self,
352
+ path1,
353
+ path2,
354
+ recursive=False,
355
+ on_error=None,
356
+ maxdepth=None,
357
+ batch_size=None,
358
+ **kwargs,
359
+ ):
360
+ if on_error is None and recursive:
361
+ on_error = "ignore"
362
+ elif on_error is None:
363
+ on_error = "raise"
364
+
365
+ if isinstance(path1, list) and isinstance(path2, list):
366
+ # No need to expand paths when both source and destination
367
+ # are provided as lists
368
+ paths1 = path1
369
+ paths2 = path2
370
+ else:
371
+ source_is_str = isinstance(path1, str)
372
+ paths1 = await self._expand_path(
373
+ path1, maxdepth=maxdepth, recursive=recursive
374
+ )
375
+ if source_is_str and (not recursive or maxdepth is not None):
376
+ # Non-recursive glob does not copy directories
377
+ paths1 = [
378
+ p for p in paths1 if not (trailing_sep(p) or await self._isdir(p))
379
+ ]
380
+ if not paths1:
381
+ return
382
+
383
+ source_is_file = len(paths1) == 1
384
+ dest_is_dir = isinstance(path2, str) and (
385
+ trailing_sep(path2) or await self._isdir(path2)
386
+ )
387
+
388
+ exists = source_is_str and (
389
+ (has_magic(path1) and source_is_file)
390
+ or (not has_magic(path1) and dest_is_dir and not trailing_sep(path1))
391
+ )
392
+ paths2 = other_paths(
393
+ paths1,
394
+ path2,
395
+ exists=exists,
396
+ flatten=not source_is_str,
397
+ )
398
+
399
+ batch_size = batch_size or self.batch_size
400
+ coros = [self._cp_file(p1, p2, **kwargs) for p1, p2 in zip(paths1, paths2)]
401
+ result = await _run_coros_in_chunks(
402
+ coros, batch_size=batch_size, return_exceptions=True, nofiles=True
403
+ )
404
+
405
+ for ex in filter(is_exception, result):
406
+ if on_error == "ignore" and isinstance(ex, FileNotFoundError):
407
+ continue
408
+ raise ex
409
+
410
+ async def _pipe_file(self, path, value, mode="overwrite", **kwargs):
411
+ raise NotImplementedError
412
+
413
+ async def _pipe(self, path, value=None, batch_size=None, **kwargs):
414
+ if isinstance(path, str):
415
+ path = {path: value}
416
+ batch_size = batch_size or self.batch_size
417
+ return await _run_coros_in_chunks(
418
+ [self._pipe_file(k, v, **kwargs) for k, v in path.items()],
419
+ batch_size=batch_size,
420
+ nofiles=True,
421
+ )
422
+
423
+ async def _process_limits(self, url, start, end):
424
+ """Helper for "Range"-based _cat_file"""
425
+ size = None
426
+ suff = False
427
+ if start is not None and start < 0:
428
+ # if start is negative and end None, end is the "suffix length"
429
+ if end is None:
430
+ end = -start
431
+ start = ""
432
+ suff = True
433
+ else:
434
+ size = size or (await self._info(url))["size"]
435
+ start = size + start
436
+ elif start is None:
437
+ start = 0
438
+ if not suff:
439
+ if end is not None and end < 0:
440
+ if start is not None:
441
+ size = size or (await self._info(url))["size"]
442
+ end = size + end
443
+ elif end is None:
444
+ end = ""
445
+ if isinstance(end, numbers.Integral):
446
+ end -= 1 # bytes range is inclusive
447
+ return f"bytes={start}-{end}"
448
+
449
+ async def _cat_file(self, path, start=None, end=None, **kwargs):
450
+ raise NotImplementedError
451
+
452
+ async def _cat(
453
+ self, path, recursive=False, on_error="raise", batch_size=None, **kwargs
454
+ ):
455
+ paths = await self._expand_path(path, recursive=recursive)
456
+ coros = [self._cat_file(path, **kwargs) for path in paths]
457
+ batch_size = batch_size or self.batch_size
458
+ out = await _run_coros_in_chunks(
459
+ coros, batch_size=batch_size, nofiles=True, return_exceptions=True
460
+ )
461
+ if on_error == "raise":
462
+ ex = next(filter(is_exception, out), False)
463
+ if ex:
464
+ raise ex
465
+ if (
466
+ len(paths) > 1
467
+ or isinstance(path, list)
468
+ or paths[0] != self._strip_protocol(path)
469
+ ):
470
+ return {
471
+ k: v
472
+ for k, v in zip(paths, out)
473
+ if on_error != "omit" or not is_exception(v)
474
+ }
475
+ else:
476
+ return out[0]
477
+
478
+ async def _cat_ranges(
479
+ self,
480
+ paths,
481
+ starts,
482
+ ends,
483
+ max_gap=None,
484
+ batch_size=None,
485
+ on_error="return",
486
+ **kwargs,
487
+ ):
488
+ """Get the contents of byte ranges from one or more files
489
+
490
+ Parameters
491
+ ----------
492
+ paths: list
493
+ A list of of filepaths on this filesystems
494
+ starts, ends: int or list
495
+ Bytes limits of the read. If using a single int, the same value will be
496
+ used to read all the specified files.
497
+ """
498
+ # TODO: on_error
499
+ if max_gap is not None:
500
+ # use utils.merge_offset_ranges
501
+ raise NotImplementedError
502
+ if not isinstance(paths, list):
503
+ raise TypeError
504
+ if not isinstance(starts, Iterable):
505
+ starts = [starts] * len(paths)
506
+ if not isinstance(ends, Iterable):
507
+ ends = [ends] * len(paths)
508
+ if len(starts) != len(paths) or len(ends) != len(paths):
509
+ raise ValueError
510
+ coros = [
511
+ self._cat_file(p, start=s, end=e, **kwargs)
512
+ for p, s, e in zip(paths, starts, ends)
513
+ ]
514
+ batch_size = batch_size or self.batch_size
515
+ return await _run_coros_in_chunks(
516
+ coros, batch_size=batch_size, nofiles=True, return_exceptions=True
517
+ )
518
+
519
+ async def _put_file(self, lpath, rpath, mode="overwrite", **kwargs):
520
+ raise NotImplementedError
521
+
522
+ async def _put(
523
+ self,
524
+ lpath,
525
+ rpath,
526
+ recursive=False,
527
+ callback=DEFAULT_CALLBACK,
528
+ batch_size=None,
529
+ maxdepth=None,
530
+ **kwargs,
531
+ ):
532
+ """Copy file(s) from local.
533
+
534
+ Copies a specific file or tree of files (if recursive=True). If rpath
535
+ ends with a "/", it will be assumed to be a directory, and target files
536
+ will go within.
537
+
538
+ The put_file method will be called concurrently on a batch of files. The
539
+ batch_size option can configure the amount of futures that can be executed
540
+ at the same time. If it is -1, then all the files will be uploaded concurrently.
541
+ The default can be set for this instance by passing "batch_size" in the
542
+ constructor, or for all instances by setting the "gather_batch_size" key
543
+ in ``fsspec.config.conf``, falling back to 1/8th of the system limit .
544
+ """
545
+ if isinstance(lpath, list) and isinstance(rpath, list):
546
+ # No need to expand paths when both source and destination
547
+ # are provided as lists
548
+ rpaths = rpath
549
+ lpaths = lpath
550
+ else:
551
+ source_is_str = isinstance(lpath, str)
552
+ if source_is_str:
553
+ lpath = make_path_posix(lpath)
554
+ fs = LocalFileSystem()
555
+ lpaths = fs.expand_path(lpath, recursive=recursive, maxdepth=maxdepth)
556
+ if source_is_str and (not recursive or maxdepth is not None):
557
+ # Non-recursive glob does not copy directories
558
+ lpaths = [p for p in lpaths if not (trailing_sep(p) or fs.isdir(p))]
559
+ if not lpaths:
560
+ return
561
+
562
+ source_is_file = len(lpaths) == 1
563
+ dest_is_dir = isinstance(rpath, str) and (
564
+ trailing_sep(rpath) or await self._isdir(rpath)
565
+ )
566
+
567
+ rpath = self._strip_protocol(rpath)
568
+ exists = source_is_str and (
569
+ (has_magic(lpath) and source_is_file)
570
+ or (not has_magic(lpath) and dest_is_dir and not trailing_sep(lpath))
571
+ )
572
+ rpaths = other_paths(
573
+ lpaths,
574
+ rpath,
575
+ exists=exists,
576
+ flatten=not source_is_str,
577
+ )
578
+
579
+ is_dir = {l: os.path.isdir(l) for l in lpaths}
580
+ rdirs = [r for l, r in zip(lpaths, rpaths) if is_dir[l]]
581
+ file_pairs = [(l, r) for l, r in zip(lpaths, rpaths) if not is_dir[l]]
582
+
583
+ await asyncio.gather(*[self._makedirs(d, exist_ok=True) for d in rdirs])
584
+ batch_size = batch_size or self.batch_size
585
+
586
+ coros = []
587
+ callback.set_size(len(file_pairs))
588
+ for lfile, rfile in file_pairs:
589
+ put_file = callback.branch_coro(self._put_file)
590
+ coros.append(put_file(lfile, rfile, **kwargs))
591
+
592
+ return await _run_coros_in_chunks(
593
+ coros, batch_size=batch_size, callback=callback
594
+ )
595
+
596
+ async def _get_file(self, rpath, lpath, **kwargs):
597
+ raise NotImplementedError
598
+
599
+ async def _get(
600
+ self,
601
+ rpath,
602
+ lpath,
603
+ recursive=False,
604
+ callback=DEFAULT_CALLBACK,
605
+ maxdepth=None,
606
+ **kwargs,
607
+ ):
608
+ """Copy file(s) to local.
609
+
610
+ Copies a specific file or tree of files (if recursive=True). If lpath
611
+ ends with a "/", it will be assumed to be a directory, and target files
612
+ will go within. Can submit a list of paths, which may be glob-patterns
613
+ and will be expanded.
614
+
615
+ The get_file method will be called concurrently on a batch of files. The
616
+ batch_size option can configure the amount of futures that can be executed
617
+ at the same time. If it is -1, then all the files will be uploaded concurrently.
618
+ The default can be set for this instance by passing "batch_size" in the
619
+ constructor, or for all instances by setting the "gather_batch_size" key
620
+ in ``fsspec.config.conf``, falling back to 1/8th of the system limit .
621
+ """
622
+ if isinstance(lpath, list) and isinstance(rpath, list):
623
+ # No need to expand paths when both source and destination
624
+ # are provided as lists
625
+ rpaths = rpath
626
+ lpaths = lpath
627
+ else:
628
+ source_is_str = isinstance(rpath, str)
629
+ # First check for rpath trailing slash as _strip_protocol removes it.
630
+ source_not_trailing_sep = source_is_str and not trailing_sep(rpath)
631
+ rpath = self._strip_protocol(rpath)
632
+ rpaths = await self._expand_path(
633
+ rpath, recursive=recursive, maxdepth=maxdepth
634
+ )
635
+ if source_is_str and (not recursive or maxdepth is not None):
636
+ # Non-recursive glob does not copy directories
637
+ rpaths = [
638
+ p for p in rpaths if not (trailing_sep(p) or await self._isdir(p))
639
+ ]
640
+ if not rpaths:
641
+ return
642
+
643
+ lpath = make_path_posix(lpath)
644
+ source_is_file = len(rpaths) == 1
645
+ dest_is_dir = isinstance(lpath, str) and (
646
+ trailing_sep(lpath) or LocalFileSystem().isdir(lpath)
647
+ )
648
+
649
+ exists = source_is_str and (
650
+ (has_magic(rpath) and source_is_file)
651
+ or (not has_magic(rpath) and dest_is_dir and source_not_trailing_sep)
652
+ )
653
+ lpaths = other_paths(
654
+ rpaths,
655
+ lpath,
656
+ exists=exists,
657
+ flatten=not source_is_str,
658
+ )
659
+
660
+ [os.makedirs(os.path.dirname(lp), exist_ok=True) for lp in lpaths]
661
+ batch_size = kwargs.pop("batch_size", self.batch_size)
662
+
663
+ coros = []
664
+ callback.set_size(len(lpaths))
665
+ for lpath, rpath in zip(lpaths, rpaths):
666
+ get_file = callback.branch_coro(self._get_file)
667
+ coros.append(get_file(rpath, lpath, **kwargs))
668
+ return await _run_coros_in_chunks(
669
+ coros, batch_size=batch_size, callback=callback
670
+ )
671
+
672
+ async def _isfile(self, path):
673
+ try:
674
+ return (await self._info(path))["type"] == "file"
675
+ except: # noqa: E722
676
+ return False
677
+
678
+ async def _isdir(self, path):
679
+ try:
680
+ return (await self._info(path))["type"] == "directory"
681
+ except OSError:
682
+ return False
683
+
684
+ async def _size(self, path):
685
+ return (await self._info(path)).get("size", None)
686
+
687
+ async def _sizes(self, paths, batch_size=None):
688
+ batch_size = batch_size or self.batch_size
689
+ return await _run_coros_in_chunks(
690
+ [self._size(p) for p in paths], batch_size=batch_size
691
+ )
692
+
693
+ async def _exists(self, path, **kwargs):
694
+ try:
695
+ await self._info(path, **kwargs)
696
+ return True
697
+ except FileNotFoundError:
698
+ return False
699
+
700
+ async def _info(self, path, **kwargs):
701
+ raise NotImplementedError
702
+
703
+ async def _ls(self, path, detail=True, **kwargs):
704
+ raise NotImplementedError
705
+
706
+ async def _walk(self, path, maxdepth=None, on_error="omit", **kwargs):
707
+ if maxdepth is not None and maxdepth < 1:
708
+ raise ValueError("maxdepth must be at least 1")
709
+
710
+ path = self._strip_protocol(path)
711
+ full_dirs = {}
712
+ dirs = {}
713
+ files = {}
714
+
715
+ detail = kwargs.pop("detail", False)
716
+ try:
717
+ listing = await self._ls(path, detail=True, **kwargs)
718
+ except (FileNotFoundError, OSError) as e:
719
+ if on_error == "raise":
720
+ raise
721
+ elif callable(on_error):
722
+ on_error(e)
723
+ if detail:
724
+ yield path, {}, {}
725
+ else:
726
+ yield path, [], []
727
+ return
728
+
729
+ for info in listing:
730
+ # each info name must be at least [path]/part , but here
731
+ # we check also for names like [path]/part/
732
+ pathname = info["name"].rstrip("/")
733
+ name = pathname.rsplit("/", 1)[-1]
734
+ if info["type"] == "directory" and pathname != path:
735
+ # do not include "self" path
736
+ full_dirs[name] = pathname
737
+ dirs[name] = info
738
+ elif pathname == path:
739
+ # file-like with same name as give path
740
+ files[""] = info
741
+ else:
742
+ files[name] = info
743
+
744
+ if detail:
745
+ yield path, dirs, files
746
+ else:
747
+ yield path, list(dirs), list(files)
748
+
749
+ if maxdepth is not None:
750
+ maxdepth -= 1
751
+ if maxdepth < 1:
752
+ return
753
+
754
+ for d in dirs:
755
+ async for _ in self._walk(
756
+ full_dirs[d], maxdepth=maxdepth, detail=detail, **kwargs
757
+ ):
758
+ yield _
759
+
760
+ async def _glob(self, path, maxdepth=None, **kwargs):
761
+ if maxdepth is not None and maxdepth < 1:
762
+ raise ValueError("maxdepth must be at least 1")
763
+
764
+ import re
765
+
766
+ seps = (os.path.sep, os.path.altsep) if os.path.altsep else (os.path.sep,)
767
+ ends_with_sep = path.endswith(seps) # _strip_protocol strips trailing slash
768
+ path = self._strip_protocol(path)
769
+ append_slash_to_dirname = ends_with_sep or path.endswith(
770
+ tuple(sep + "**" for sep in seps)
771
+ )
772
+ idx_star = path.find("*") if path.find("*") >= 0 else len(path)
773
+ idx_qmark = path.find("?") if path.find("?") >= 0 else len(path)
774
+ idx_brace = path.find("[") if path.find("[") >= 0 else len(path)
775
+
776
+ min_idx = min(idx_star, idx_qmark, idx_brace)
777
+
778
+ detail = kwargs.pop("detail", False)
779
+
780
+ if not has_magic(path):
781
+ if await self._exists(path, **kwargs):
782
+ if not detail:
783
+ return [path]
784
+ else:
785
+ return {path: await self._info(path, **kwargs)}
786
+ else:
787
+ if not detail:
788
+ return [] # glob of non-existent returns empty
789
+ else:
790
+ return {}
791
+ elif "/" in path[:min_idx]:
792
+ min_idx = path[:min_idx].rindex("/")
793
+ root = path[: min_idx + 1]
794
+ depth = path[min_idx + 1 :].count("/") + 1
795
+ else:
796
+ root = ""
797
+ depth = path[min_idx + 1 :].count("/") + 1
798
+
799
+ if "**" in path:
800
+ if maxdepth is not None:
801
+ idx_double_stars = path.find("**")
802
+ depth_double_stars = path[idx_double_stars:].count("/") + 1
803
+ depth = depth - depth_double_stars + maxdepth
804
+ else:
805
+ depth = None
806
+
807
+ allpaths = await self._find(
808
+ root, maxdepth=depth, withdirs=True, detail=True, **kwargs
809
+ )
810
+
811
+ pattern = glob_translate(path + ("/" if ends_with_sep else ""))
812
+ pattern = re.compile(pattern)
813
+
814
+ out = {
815
+ p: info
816
+ for p, info in sorted(allpaths.items())
817
+ if pattern.match(
818
+ p + "/"
819
+ if append_slash_to_dirname and info["type"] == "directory"
820
+ else p
821
+ )
822
+ }
823
+
824
+ if detail:
825
+ return out
826
+ else:
827
+ return list(out)
828
+
829
+ async def _du(self, path, total=True, maxdepth=None, **kwargs):
830
+ sizes = {}
831
+ # async for?
832
+ for f in await self._find(path, maxdepth=maxdepth, **kwargs):
833
+ info = await self._info(f)
834
+ sizes[info["name"]] = info["size"]
835
+ if total:
836
+ return sum(sizes.values())
837
+ else:
838
+ return sizes
839
+
840
+ async def _find(self, path, maxdepth=None, withdirs=False, **kwargs):
841
+ path = self._strip_protocol(path)
842
+ out = {}
843
+ detail = kwargs.pop("detail", False)
844
+
845
+ # Add the root directory if withdirs is requested
846
+ # This is needed for posix glob compliance
847
+ if withdirs and path != "" and await self._isdir(path):
848
+ out[path] = await self._info(path)
849
+
850
+ # async for?
851
+ async for _, dirs, files in self._walk(path, maxdepth, detail=True, **kwargs):
852
+ if withdirs:
853
+ files.update(dirs)
854
+ out.update({info["name"]: info for name, info in files.items()})
855
+ if not out and (await self._isfile(path)):
856
+ # walk works on directories, but find should also return [path]
857
+ # when path happens to be a file
858
+ out[path] = {}
859
+ names = sorted(out)
860
+ if not detail:
861
+ return names
862
+ else:
863
+ return {name: out[name] for name in names}
864
+
865
+ async def _expand_path(self, path, recursive=False, maxdepth=None):
866
+ if maxdepth is not None and maxdepth < 1:
867
+ raise ValueError("maxdepth must be at least 1")
868
+
869
+ if isinstance(path, str):
870
+ out = await self._expand_path([path], recursive, maxdepth)
871
+ else:
872
+ out = set()
873
+ path = [self._strip_protocol(p) for p in path]
874
+ for p in path: # can gather here
875
+ if has_magic(p):
876
+ bit = set(await self._glob(p, maxdepth=maxdepth))
877
+ out |= bit
878
+ if recursive:
879
+ # glob call above expanded one depth so if maxdepth is defined
880
+ # then decrement it in expand_path call below. If it is zero
881
+ # after decrementing then avoid expand_path call.
882
+ if maxdepth is not None and maxdepth <= 1:
883
+ continue
884
+ out |= set(
885
+ await self._expand_path(
886
+ list(bit),
887
+ recursive=recursive,
888
+ maxdepth=maxdepth - 1 if maxdepth is not None else None,
889
+ )
890
+ )
891
+ continue
892
+ elif recursive:
893
+ rec = set(await self._find(p, maxdepth=maxdepth, withdirs=True))
894
+ out |= rec
895
+ if p not in out and (recursive is False or (await self._exists(p))):
896
+ # should only check once, for the root
897
+ out.add(p)
898
+ if not out:
899
+ raise FileNotFoundError(path)
900
+ return sorted(out)
901
+
902
+ async def _mkdir(self, path, create_parents=True, **kwargs):
903
+ pass # not necessary to implement, may not have directories
904
+
905
+ async def _makedirs(self, path, exist_ok=False):
906
+ pass # not necessary to implement, may not have directories
907
+
908
+ async def open_async(self, path, mode="rb", **kwargs):
909
+ if "b" not in mode or kwargs.get("compression"):
910
+ raise ValueError
911
+ raise NotImplementedError
912
+
913
+
914
+ def mirror_sync_methods(obj):
915
+ """Populate sync and async methods for obj
916
+
917
+ For each method will create a sync version if the name refers to an async method
918
+ (coroutine) and there is no override in the child class; will create an async
919
+ method for the corresponding sync method if there is no implementation.
920
+
921
+ Uses the methods specified in
922
+ - async_methods: the set that an implementation is expected to provide
923
+ - default_async_methods: that can be derived from their sync version in
924
+ AbstractFileSystem
925
+ - AsyncFileSystem: async-specific default coroutines
926
+ """
927
+ from fsspec import AbstractFileSystem
928
+
929
+ for method in async_methods + dir(AsyncFileSystem):
930
+ if not method.startswith("_"):
931
+ continue
932
+ smethod = method[1:]
933
+ if private.match(method):
934
+ isco = inspect.iscoroutinefunction(getattr(obj, method, None))
935
+ unsync = getattr(getattr(obj, smethod, False), "__func__", None)
936
+ is_default = unsync is getattr(AbstractFileSystem, smethod, "")
937
+ if isco and is_default:
938
+ mth = sync_wrapper(getattr(obj, method), obj=obj)
939
+ setattr(obj, smethod, mth)
940
+ if not mth.__doc__:
941
+ mth.__doc__ = getattr(
942
+ getattr(AbstractFileSystem, smethod, None), "__doc__", ""
943
+ )
944
+
945
+
946
+ class FSSpecCoroutineCancel(Exception):
947
+ pass
948
+
949
+
950
+ def _dump_running_tasks(
951
+ printout=True, cancel=True, exc=FSSpecCoroutineCancel, with_task=False
952
+ ):
953
+ import traceback
954
+
955
+ tasks = [t for t in asyncio.tasks.all_tasks(loop[0]) if not t.done()]
956
+ if printout:
957
+ [task.print_stack() for task in tasks]
958
+ out = [
959
+ {
960
+ "locals": task._coro.cr_frame.f_locals,
961
+ "file": task._coro.cr_frame.f_code.co_filename,
962
+ "firstline": task._coro.cr_frame.f_code.co_firstlineno,
963
+ "linelo": task._coro.cr_frame.f_lineno,
964
+ "stack": traceback.format_stack(task._coro.cr_frame),
965
+ "task": task if with_task else None,
966
+ }
967
+ for task in tasks
968
+ ]
969
+ if cancel:
970
+ for t in tasks:
971
+ cbs = t._callbacks
972
+ t.cancel()
973
+ asyncio.futures.Future.set_exception(t, exc)
974
+ asyncio.futures.Future.cancel(t)
975
+ [cb[0](t) for cb in cbs] # cancels any dependent concurrent.futures
976
+ try:
977
+ t._coro.throw(exc) # exits coro, unless explicitly handled
978
+ except exc:
979
+ pass
980
+ return out
981
+
982
+
983
+ class AbstractAsyncStreamedFile(AbstractBufferedFile):
984
+ # no read buffering, and always auto-commit
985
+ # TODO: readahead might still be useful here, but needs async version
986
+
987
+ async def read(self, length=-1):
988
+ """
989
+ Return data from cache, or fetch pieces as necessary
990
+
991
+ Parameters
992
+ ----------
993
+ length: int (-1)
994
+ Number of bytes to read; if <0, all remaining bytes.
995
+ """
996
+ length = -1 if length is None else int(length)
997
+ if self.mode != "rb":
998
+ raise ValueError("File not in read mode")
999
+ if length < 0:
1000
+ length = self.size - self.loc
1001
+ if self.closed:
1002
+ raise ValueError("I/O operation on closed file.")
1003
+ if length == 0:
1004
+ # don't even bother calling fetch
1005
+ return b""
1006
+ out = await self._fetch_range(self.loc, self.loc + length)
1007
+ self.loc += len(out)
1008
+ return out
1009
+
1010
+ async def write(self, data):
1011
+ """
1012
+ Write data to buffer.
1013
+
1014
+ Buffer only sent on flush() or if buffer is greater than
1015
+ or equal to blocksize.
1016
+
1017
+ Parameters
1018
+ ----------
1019
+ data: bytes
1020
+ Set of bytes to be written.
1021
+ """
1022
+ if self.mode not in {"wb", "ab"}:
1023
+ raise ValueError("File not in write mode")
1024
+ if self.closed:
1025
+ raise ValueError("I/O operation on closed file.")
1026
+ if self.forced:
1027
+ raise ValueError("This file has been force-flushed, can only close")
1028
+ out = self.buffer.write(data)
1029
+ self.loc += out
1030
+ if self.buffer.tell() >= self.blocksize:
1031
+ await self.flush()
1032
+ return out
1033
+
1034
+ async def close(self):
1035
+ """Close file
1036
+
1037
+ Finalizes writes, discards cache
1038
+ """
1039
+ if getattr(self, "_unclosable", False):
1040
+ return
1041
+ if self.closed:
1042
+ return
1043
+ if self.mode == "rb":
1044
+ self.cache = None
1045
+ else:
1046
+ if not self.forced:
1047
+ await self.flush(force=True)
1048
+
1049
+ if self.fs is not None:
1050
+ self.fs.invalidate_cache(self.path)
1051
+ self.fs.invalidate_cache(self.fs._parent(self.path))
1052
+
1053
+ self.closed = True
1054
+
1055
+ async def flush(self, force=False):
1056
+ if self.closed:
1057
+ raise ValueError("Flush on closed file")
1058
+ if force and self.forced:
1059
+ raise ValueError("Force flush cannot be called more than once")
1060
+ if force:
1061
+ self.forced = True
1062
+
1063
+ if self.mode not in {"wb", "ab"}:
1064
+ # no-op to flush on read-mode
1065
+ return
1066
+
1067
+ if not force and self.buffer.tell() < self.blocksize:
1068
+ # Defer write on small block
1069
+ return
1070
+
1071
+ if self.offset is None:
1072
+ # Initialize a multipart upload
1073
+ self.offset = 0
1074
+ try:
1075
+ await self._initiate_upload()
1076
+ except:
1077
+ self.closed = True
1078
+ raise
1079
+
1080
+ if await self._upload_chunk(final=force) is not False:
1081
+ self.offset += self.buffer.seek(0, 2)
1082
+ self.buffer = io.BytesIO()
1083
+
1084
+ async def __aenter__(self):
1085
+ return self
1086
+
1087
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
1088
+ await self.close()
1089
+
1090
+ async def _fetch_range(self, start, end):
1091
+ raise NotImplementedError
1092
+
1093
+ async def _initiate_upload(self):
1094
+ pass
1095
+
1096
+ async def _upload_chunk(self, final=False):
1097
+ raise NotImplementedError
venv/lib/python3.13/site-packages/fsspec/caching.py ADDED
@@ -0,0 +1,1004 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import collections
4
+ import functools
5
+ import logging
6
+ import math
7
+ import os
8
+ import threading
9
+ import warnings
10
+ from collections import OrderedDict
11
+ from concurrent.futures import Future, ThreadPoolExecutor
12
+ from itertools import groupby
13
+ from operator import itemgetter
14
+ from typing import (
15
+ TYPE_CHECKING,
16
+ Any,
17
+ Callable,
18
+ ClassVar,
19
+ Generic,
20
+ NamedTuple,
21
+ TypeVar,
22
+ )
23
+
24
+ if TYPE_CHECKING:
25
+ import mmap
26
+
27
+ from typing_extensions import ParamSpec
28
+
29
+ P = ParamSpec("P")
30
+ else:
31
+ P = TypeVar("P")
32
+
33
+ T = TypeVar("T")
34
+
35
+
36
+ logger = logging.getLogger("fsspec")
37
+
38
+ Fetcher = Callable[[int, int], bytes] # Maps (start, end) to bytes
39
+ MultiFetcher = Callable[[list[int, int]], bytes] # Maps [(start, end)] to bytes
40
+
41
+
42
+ class BaseCache:
43
+ """Pass-though cache: doesn't keep anything, calls every time
44
+
45
+ Acts as base class for other cachers
46
+
47
+ Parameters
48
+ ----------
49
+ blocksize: int
50
+ How far to read ahead in numbers of bytes
51
+ fetcher: func
52
+ Function of the form f(start, end) which gets bytes from remote as
53
+ specified
54
+ size: int
55
+ How big this file is
56
+ """
57
+
58
+ name: ClassVar[str] = "none"
59
+
60
+ def __init__(self, blocksize: int, fetcher: Fetcher, size: int) -> None:
61
+ self.blocksize = blocksize
62
+ self.nblocks = 0
63
+ self.fetcher = fetcher
64
+ self.size = size
65
+ self.hit_count = 0
66
+ self.miss_count = 0
67
+ # the bytes that we actually requested
68
+ self.total_requested_bytes = 0
69
+
70
+ def _fetch(self, start: int | None, stop: int | None) -> bytes:
71
+ if start is None:
72
+ start = 0
73
+ if stop is None:
74
+ stop = self.size
75
+ if start >= self.size or start >= stop:
76
+ return b""
77
+ return self.fetcher(start, stop)
78
+
79
+ def _reset_stats(self) -> None:
80
+ """Reset hit and miss counts for a more ganular report e.g. by file."""
81
+ self.hit_count = 0
82
+ self.miss_count = 0
83
+ self.total_requested_bytes = 0
84
+
85
+ def _log_stats(self) -> str:
86
+ """Return a formatted string of the cache statistics."""
87
+ if self.hit_count == 0 and self.miss_count == 0:
88
+ # a cache that does nothing, this is for logs only
89
+ return ""
90
+ return f" , {self.name}: {self.hit_count} hits, {self.miss_count} misses, {self.total_requested_bytes} total requested bytes"
91
+
92
+ def __repr__(self) -> str:
93
+ # TODO: use rich for better formatting
94
+ return f"""
95
+ <{self.__class__.__name__}:
96
+ block size : {self.blocksize}
97
+ block count : {self.nblocks}
98
+ file size : {self.size}
99
+ cache hits : {self.hit_count}
100
+ cache misses: {self.miss_count}
101
+ total requested bytes: {self.total_requested_bytes}>
102
+ """
103
+
104
+
105
+ class MMapCache(BaseCache):
106
+ """memory-mapped sparse file cache
107
+
108
+ Opens temporary file, which is filled blocks-wise when data is requested.
109
+ Ensure there is enough disc space in the temporary location.
110
+
111
+ This cache method might only work on posix
112
+
113
+ Parameters
114
+ ----------
115
+ blocksize: int
116
+ How far to read ahead in numbers of bytes
117
+ fetcher: Fetcher
118
+ Function of the form f(start, end) which gets bytes from remote as
119
+ specified
120
+ size: int
121
+ How big this file is
122
+ location: str
123
+ Where to create the temporary file. If None, a temporary file is
124
+ created using tempfile.TemporaryFile().
125
+ blocks: set[int]
126
+ Set of block numbers that have already been fetched. If None, an empty
127
+ set is created.
128
+ multi_fetcher: MultiFetcher
129
+ Function of the form f([(start, end)]) which gets bytes from remote
130
+ as specified. This function is used to fetch multiple blocks at once.
131
+ If not specified, the fetcher function is used instead.
132
+ """
133
+
134
+ name = "mmap"
135
+
136
+ def __init__(
137
+ self,
138
+ blocksize: int,
139
+ fetcher: Fetcher,
140
+ size: int,
141
+ location: str | None = None,
142
+ blocks: set[int] | None = None,
143
+ multi_fetcher: MultiFetcher | None = None,
144
+ ) -> None:
145
+ super().__init__(blocksize, fetcher, size)
146
+ self.blocks = set() if blocks is None else blocks
147
+ self.location = location
148
+ self.multi_fetcher = multi_fetcher
149
+ self.cache = self._makefile()
150
+
151
+ def _makefile(self) -> mmap.mmap | bytearray:
152
+ import mmap
153
+ import tempfile
154
+
155
+ if self.size == 0:
156
+ return bytearray()
157
+
158
+ # posix version
159
+ if self.location is None or not os.path.exists(self.location):
160
+ if self.location is None:
161
+ fd = tempfile.TemporaryFile()
162
+ self.blocks = set()
163
+ else:
164
+ fd = open(self.location, "wb+")
165
+ fd.seek(self.size - 1)
166
+ fd.write(b"1")
167
+ fd.flush()
168
+ else:
169
+ fd = open(self.location, "r+b")
170
+
171
+ return mmap.mmap(fd.fileno(), self.size)
172
+
173
+ def _fetch(self, start: int | None, end: int | None) -> bytes:
174
+ logger.debug(f"MMap cache fetching {start}-{end}")
175
+ if start is None:
176
+ start = 0
177
+ if end is None:
178
+ end = self.size
179
+ if start >= self.size or start >= end:
180
+ return b""
181
+ start_block = start // self.blocksize
182
+ end_block = end // self.blocksize
183
+ block_range = range(start_block, end_block + 1)
184
+ # Determine which blocks need to be fetched. This sequence is sorted by construction.
185
+ need = (i for i in block_range if i not in self.blocks)
186
+ # Count the number of blocks already cached
187
+ self.hit_count += sum(1 for i in block_range if i in self.blocks)
188
+
189
+ ranges = []
190
+
191
+ # Consolidate needed blocks.
192
+ # Algorithm adapted from Python 2.x itertools documentation.
193
+ # We are grouping an enumerated sequence of blocks. By comparing when the difference
194
+ # between an ascending range (provided by enumerate) and the needed block numbers
195
+ # we can detect when the block number skips values. The key computes this difference.
196
+ # Whenever the difference changes, we know that we have previously cached block(s),
197
+ # and a new group is started. In other words, this algorithm neatly groups
198
+ # runs of consecutive block numbers so they can be fetched together.
199
+ for _, _blocks in groupby(enumerate(need), key=lambda x: x[0] - x[1]):
200
+ # Extract the blocks from the enumerated sequence
201
+ _blocks = tuple(map(itemgetter(1), _blocks))
202
+ # Compute start of first block
203
+ sstart = _blocks[0] * self.blocksize
204
+ # Compute the end of the last block. Last block may not be full size.
205
+ send = min(_blocks[-1] * self.blocksize + self.blocksize, self.size)
206
+
207
+ # Fetch bytes (could be multiple consecutive blocks)
208
+ self.total_requested_bytes += send - sstart
209
+ logger.debug(
210
+ f"MMap get blocks {_blocks[0]}-{_blocks[-1]} ({sstart}-{send})"
211
+ )
212
+ ranges.append((sstart, send))
213
+
214
+ # Update set of cached blocks
215
+ self.blocks.update(_blocks)
216
+ # Update cache statistics with number of blocks we had to cache
217
+ self.miss_count += len(_blocks)
218
+
219
+ if not ranges:
220
+ return self.cache[start:end]
221
+
222
+ if self.multi_fetcher:
223
+ logger.debug(f"MMap get blocks {ranges}")
224
+ for idx, r in enumerate(self.multi_fetcher(ranges)):
225
+ (sstart, send) = ranges[idx]
226
+ logger.debug(f"MMap copy block ({sstart}-{send}")
227
+ self.cache[sstart:send] = r
228
+ else:
229
+ for sstart, send in ranges:
230
+ logger.debug(f"MMap get block ({sstart}-{send}")
231
+ self.cache[sstart:send] = self.fetcher(sstart, send)
232
+
233
+ return self.cache[start:end]
234
+
235
+ def __getstate__(self) -> dict[str, Any]:
236
+ state = self.__dict__.copy()
237
+ # Remove the unpicklable entries.
238
+ del state["cache"]
239
+ return state
240
+
241
+ def __setstate__(self, state: dict[str, Any]) -> None:
242
+ # Restore instance attributes
243
+ self.__dict__.update(state)
244
+ self.cache = self._makefile()
245
+
246
+
247
+ class ReadAheadCache(BaseCache):
248
+ """Cache which reads only when we get beyond a block of data
249
+
250
+ This is a much simpler version of BytesCache, and does not attempt to
251
+ fill holes in the cache or keep fragments alive. It is best suited to
252
+ many small reads in a sequential order (e.g., reading lines from a file).
253
+ """
254
+
255
+ name = "readahead"
256
+
257
+ def __init__(self, blocksize: int, fetcher: Fetcher, size: int) -> None:
258
+ super().__init__(blocksize, fetcher, size)
259
+ self.cache = b""
260
+ self.start = 0
261
+ self.end = 0
262
+
263
+ def _fetch(self, start: int | None, end: int | None) -> bytes:
264
+ if start is None:
265
+ start = 0
266
+ if end is None or end > self.size:
267
+ end = self.size
268
+ if start >= self.size or start >= end:
269
+ return b""
270
+ l = end - start
271
+ if start >= self.start and end <= self.end:
272
+ # cache hit
273
+ self.hit_count += 1
274
+ return self.cache[start - self.start : end - self.start]
275
+ elif self.start <= start < self.end:
276
+ # partial hit
277
+ self.miss_count += 1
278
+ part = self.cache[start - self.start :]
279
+ l -= len(part)
280
+ start = self.end
281
+ else:
282
+ # miss
283
+ self.miss_count += 1
284
+ part = b""
285
+ end = min(self.size, end + self.blocksize)
286
+ self.total_requested_bytes += end - start
287
+ self.cache = self.fetcher(start, end) # new block replaces old
288
+ self.start = start
289
+ self.end = self.start + len(self.cache)
290
+ return part + self.cache[:l]
291
+
292
+
293
+ class FirstChunkCache(BaseCache):
294
+ """Caches the first block of a file only
295
+
296
+ This may be useful for file types where the metadata is stored in the header,
297
+ but is randomly accessed.
298
+ """
299
+
300
+ name = "first"
301
+
302
+ def __init__(self, blocksize: int, fetcher: Fetcher, size: int) -> None:
303
+ if blocksize > size:
304
+ # this will buffer the whole thing
305
+ blocksize = size
306
+ super().__init__(blocksize, fetcher, size)
307
+ self.cache: bytes | None = None
308
+
309
+ def _fetch(self, start: int | None, end: int | None) -> bytes:
310
+ start = start or 0
311
+ if start > self.size:
312
+ logger.debug("FirstChunkCache: requested start > file size")
313
+ return b""
314
+
315
+ end = min(end, self.size)
316
+
317
+ if start < self.blocksize:
318
+ if self.cache is None:
319
+ self.miss_count += 1
320
+ if end > self.blocksize:
321
+ self.total_requested_bytes += end
322
+ data = self.fetcher(0, end)
323
+ self.cache = data[: self.blocksize]
324
+ return data[start:]
325
+ self.cache = self.fetcher(0, self.blocksize)
326
+ self.total_requested_bytes += self.blocksize
327
+ part = self.cache[start:end]
328
+ if end > self.blocksize:
329
+ self.total_requested_bytes += end - self.blocksize
330
+ part += self.fetcher(self.blocksize, end)
331
+ self.hit_count += 1
332
+ return part
333
+ else:
334
+ self.miss_count += 1
335
+ self.total_requested_bytes += end - start
336
+ return self.fetcher(start, end)
337
+
338
+
339
+ class BlockCache(BaseCache):
340
+ """
341
+ Cache holding memory as a set of blocks.
342
+
343
+ Requests are only ever made ``blocksize`` at a time, and are
344
+ stored in an LRU cache. The least recently accessed block is
345
+ discarded when more than ``maxblocks`` are stored.
346
+
347
+ Parameters
348
+ ----------
349
+ blocksize : int
350
+ The number of bytes to store in each block.
351
+ Requests are only ever made for ``blocksize``, so this
352
+ should balance the overhead of making a request against
353
+ the granularity of the blocks.
354
+ fetcher : Callable
355
+ size : int
356
+ The total size of the file being cached.
357
+ maxblocks : int
358
+ The maximum number of blocks to cache for. The maximum memory
359
+ use for this cache is then ``blocksize * maxblocks``.
360
+ """
361
+
362
+ name = "blockcache"
363
+
364
+ def __init__(
365
+ self, blocksize: int, fetcher: Fetcher, size: int, maxblocks: int = 32
366
+ ) -> None:
367
+ super().__init__(blocksize, fetcher, size)
368
+ self.nblocks = math.ceil(size / blocksize)
369
+ self.maxblocks = maxblocks
370
+ self._fetch_block_cached = functools.lru_cache(maxblocks)(self._fetch_block)
371
+
372
+ def cache_info(self):
373
+ """
374
+ The statistics on the block cache.
375
+
376
+ Returns
377
+ -------
378
+ NamedTuple
379
+ Returned directly from the LRU Cache used internally.
380
+ """
381
+ return self._fetch_block_cached.cache_info()
382
+
383
+ def __getstate__(self) -> dict[str, Any]:
384
+ state = self.__dict__
385
+ del state["_fetch_block_cached"]
386
+ return state
387
+
388
+ def __setstate__(self, state: dict[str, Any]) -> None:
389
+ self.__dict__.update(state)
390
+ self._fetch_block_cached = functools.lru_cache(state["maxblocks"])(
391
+ self._fetch_block
392
+ )
393
+
394
+ def _fetch(self, start: int | None, end: int | None) -> bytes:
395
+ if start is None:
396
+ start = 0
397
+ if end is None:
398
+ end = self.size
399
+ if start >= self.size or start >= end:
400
+ return b""
401
+
402
+ # byte position -> block numbers
403
+ start_block_number = start // self.blocksize
404
+ end_block_number = end // self.blocksize
405
+
406
+ # these are cached, so safe to do multiple calls for the same start and end.
407
+ for block_number in range(start_block_number, end_block_number + 1):
408
+ self._fetch_block_cached(block_number)
409
+
410
+ return self._read_cache(
411
+ start,
412
+ end,
413
+ start_block_number=start_block_number,
414
+ end_block_number=end_block_number,
415
+ )
416
+
417
+ def _fetch_block(self, block_number: int) -> bytes:
418
+ """
419
+ Fetch the block of data for `block_number`.
420
+ """
421
+ if block_number > self.nblocks:
422
+ raise ValueError(
423
+ f"'block_number={block_number}' is greater than "
424
+ f"the number of blocks ({self.nblocks})"
425
+ )
426
+
427
+ start = block_number * self.blocksize
428
+ end = start + self.blocksize
429
+ self.total_requested_bytes += end - start
430
+ self.miss_count += 1
431
+ logger.info("BlockCache fetching block %d", block_number)
432
+ block_contents = super()._fetch(start, end)
433
+ return block_contents
434
+
435
+ def _read_cache(
436
+ self, start: int, end: int, start_block_number: int, end_block_number: int
437
+ ) -> bytes:
438
+ """
439
+ Read from our block cache.
440
+
441
+ Parameters
442
+ ----------
443
+ start, end : int
444
+ The start and end byte positions.
445
+ start_block_number, end_block_number : int
446
+ The start and end block numbers.
447
+ """
448
+ start_pos = start % self.blocksize
449
+ end_pos = end % self.blocksize
450
+
451
+ self.hit_count += 1
452
+ if start_block_number == end_block_number:
453
+ block: bytes = self._fetch_block_cached(start_block_number)
454
+ return block[start_pos:end_pos]
455
+
456
+ else:
457
+ # read from the initial
458
+ out = [self._fetch_block_cached(start_block_number)[start_pos:]]
459
+
460
+ # intermediate blocks
461
+ # Note: it'd be nice to combine these into one big request. However
462
+ # that doesn't play nicely with our LRU cache.
463
+ out.extend(
464
+ map(
465
+ self._fetch_block_cached,
466
+ range(start_block_number + 1, end_block_number),
467
+ )
468
+ )
469
+
470
+ # final block
471
+ out.append(self._fetch_block_cached(end_block_number)[:end_pos])
472
+
473
+ return b"".join(out)
474
+
475
+
476
+ class BytesCache(BaseCache):
477
+ """Cache which holds data in a in-memory bytes object
478
+
479
+ Implements read-ahead by the block size, for semi-random reads progressing
480
+ through the file.
481
+
482
+ Parameters
483
+ ----------
484
+ trim: bool
485
+ As we read more data, whether to discard the start of the buffer when
486
+ we are more than a blocksize ahead of it.
487
+ """
488
+
489
+ name: ClassVar[str] = "bytes"
490
+
491
+ def __init__(
492
+ self, blocksize: int, fetcher: Fetcher, size: int, trim: bool = True
493
+ ) -> None:
494
+ super().__init__(blocksize, fetcher, size)
495
+ self.cache = b""
496
+ self.start: int | None = None
497
+ self.end: int | None = None
498
+ self.trim = trim
499
+
500
+ def _fetch(self, start: int | None, end: int | None) -> bytes:
501
+ # TODO: only set start/end after fetch, in case it fails?
502
+ # is this where retry logic might go?
503
+ if start is None:
504
+ start = 0
505
+ if end is None:
506
+ end = self.size
507
+ if start >= self.size or start >= end:
508
+ return b""
509
+ if (
510
+ self.start is not None
511
+ and start >= self.start
512
+ and self.end is not None
513
+ and end < self.end
514
+ ):
515
+ # cache hit: we have all the required data
516
+ offset = start - self.start
517
+ self.hit_count += 1
518
+ return self.cache[offset : offset + end - start]
519
+
520
+ if self.blocksize:
521
+ bend = min(self.size, end + self.blocksize)
522
+ else:
523
+ bend = end
524
+
525
+ if bend == start or start > self.size:
526
+ return b""
527
+
528
+ if (self.start is None or start < self.start) and (
529
+ self.end is None or end > self.end
530
+ ):
531
+ # First read, or extending both before and after
532
+ self.total_requested_bytes += bend - start
533
+ self.miss_count += 1
534
+ self.cache = self.fetcher(start, bend)
535
+ self.start = start
536
+ else:
537
+ assert self.start is not None
538
+ assert self.end is not None
539
+ self.miss_count += 1
540
+
541
+ if start < self.start:
542
+ if self.end is None or self.end - end > self.blocksize:
543
+ self.total_requested_bytes += bend - start
544
+ self.cache = self.fetcher(start, bend)
545
+ self.start = start
546
+ else:
547
+ self.total_requested_bytes += self.start - start
548
+ new = self.fetcher(start, self.start)
549
+ self.start = start
550
+ self.cache = new + self.cache
551
+ elif self.end is not None and bend > self.end:
552
+ if self.end > self.size:
553
+ pass
554
+ elif end - self.end > self.blocksize:
555
+ self.total_requested_bytes += bend - start
556
+ self.cache = self.fetcher(start, bend)
557
+ self.start = start
558
+ else:
559
+ self.total_requested_bytes += bend - self.end
560
+ new = self.fetcher(self.end, bend)
561
+ self.cache = self.cache + new
562
+
563
+ self.end = self.start + len(self.cache)
564
+ offset = start - self.start
565
+ out = self.cache[offset : offset + end - start]
566
+ if self.trim:
567
+ num = (self.end - self.start) // (self.blocksize + 1)
568
+ if num > 1:
569
+ self.start += self.blocksize * num
570
+ self.cache = self.cache[self.blocksize * num :]
571
+ return out
572
+
573
+ def __len__(self) -> int:
574
+ return len(self.cache)
575
+
576
+
577
+ class AllBytes(BaseCache):
578
+ """Cache entire contents of the file"""
579
+
580
+ name: ClassVar[str] = "all"
581
+
582
+ def __init__(
583
+ self,
584
+ blocksize: int | None = None,
585
+ fetcher: Fetcher | None = None,
586
+ size: int | None = None,
587
+ data: bytes | None = None,
588
+ ) -> None:
589
+ super().__init__(blocksize, fetcher, size) # type: ignore[arg-type]
590
+ if data is None:
591
+ self.miss_count += 1
592
+ self.total_requested_bytes += self.size
593
+ data = self.fetcher(0, self.size)
594
+ self.data = data
595
+
596
+ def _fetch(self, start: int | None, stop: int | None) -> bytes:
597
+ self.hit_count += 1
598
+ return self.data[start:stop]
599
+
600
+
601
+ class KnownPartsOfAFile(BaseCache):
602
+ """
603
+ Cache holding known file parts.
604
+
605
+ Parameters
606
+ ----------
607
+ blocksize: int
608
+ How far to read ahead in numbers of bytes
609
+ fetcher: func
610
+ Function of the form f(start, end) which gets bytes from remote as
611
+ specified
612
+ size: int
613
+ How big this file is
614
+ data: dict
615
+ A dictionary mapping explicit `(start, stop)` file-offset tuples
616
+ with known bytes.
617
+ strict: bool, default True
618
+ Whether to fetch reads that go beyond a known byte-range boundary.
619
+ If `False`, any read that ends outside a known part will be zero
620
+ padded. Note that zero padding will not be used for reads that
621
+ begin outside a known byte-range.
622
+ """
623
+
624
+ name: ClassVar[str] = "parts"
625
+
626
+ def __init__(
627
+ self,
628
+ blocksize: int,
629
+ fetcher: Fetcher,
630
+ size: int,
631
+ data: dict[tuple[int, int], bytes] | None = None,
632
+ strict: bool = True,
633
+ **_: Any,
634
+ ):
635
+ super().__init__(blocksize, fetcher, size)
636
+ self.strict = strict
637
+
638
+ # simple consolidation of contiguous blocks
639
+ if data:
640
+ old_offsets = sorted(data.keys())
641
+ offsets = [old_offsets[0]]
642
+ blocks = [data.pop(old_offsets[0])]
643
+ for start, stop in old_offsets[1:]:
644
+ start0, stop0 = offsets[-1]
645
+ if start == stop0:
646
+ offsets[-1] = (start0, stop)
647
+ blocks[-1] += data.pop((start, stop))
648
+ else:
649
+ offsets.append((start, stop))
650
+ blocks.append(data.pop((start, stop)))
651
+
652
+ self.data = dict(zip(offsets, blocks))
653
+ else:
654
+ self.data = {}
655
+
656
+ def _fetch(self, start: int | None, stop: int | None) -> bytes:
657
+ if start is None:
658
+ start = 0
659
+ if stop is None:
660
+ stop = self.size
661
+
662
+ out = b""
663
+ for (loc0, loc1), data in self.data.items():
664
+ # If self.strict=False, use zero-padded data
665
+ # for reads beyond the end of a "known" buffer
666
+ if loc0 <= start < loc1:
667
+ off = start - loc0
668
+ out = data[off : off + stop - start]
669
+ if not self.strict or loc0 <= stop <= loc1:
670
+ # The request is within a known range, or
671
+ # it begins within a known range, and we
672
+ # are allowed to pad reads beyond the
673
+ # buffer with zero
674
+ out += b"\x00" * (stop - start - len(out))
675
+ self.hit_count += 1
676
+ return out
677
+ else:
678
+ # The request ends outside a known range,
679
+ # and we are being "strict" about reads
680
+ # beyond the buffer
681
+ start = loc1
682
+ break
683
+
684
+ # We only get here if there is a request outside the
685
+ # known parts of the file. In an ideal world, this
686
+ # should never happen
687
+ if self.fetcher is None:
688
+ # We cannot fetch the data, so raise an error
689
+ raise ValueError(f"Read is outside the known file parts: {(start, stop)}. ")
690
+ # We can fetch the data, but should warn the user
691
+ # that this may be slow
692
+ warnings.warn(
693
+ f"Read is outside the known file parts: {(start, stop)}. "
694
+ f"IO/caching performance may be poor!"
695
+ )
696
+ logger.debug(f"KnownPartsOfAFile cache fetching {start}-{stop}")
697
+ self.total_requested_bytes += stop - start
698
+ self.miss_count += 1
699
+ return out + super()._fetch(start, stop)
700
+
701
+
702
+ class UpdatableLRU(Generic[P, T]):
703
+ """
704
+ Custom implementation of LRU cache that allows updating keys
705
+
706
+ Used by BackgroudBlockCache
707
+ """
708
+
709
+ class CacheInfo(NamedTuple):
710
+ hits: int
711
+ misses: int
712
+ maxsize: int
713
+ currsize: int
714
+
715
+ def __init__(self, func: Callable[P, T], max_size: int = 128) -> None:
716
+ self._cache: OrderedDict[Any, T] = collections.OrderedDict()
717
+ self._func = func
718
+ self._max_size = max_size
719
+ self._hits = 0
720
+ self._misses = 0
721
+ self._lock = threading.Lock()
722
+
723
+ def __call__(self, *args: P.args, **kwargs: P.kwargs) -> T:
724
+ if kwargs:
725
+ raise TypeError(f"Got unexpected keyword argument {kwargs.keys()}")
726
+ with self._lock:
727
+ if args in self._cache:
728
+ self._cache.move_to_end(args)
729
+ self._hits += 1
730
+ return self._cache[args]
731
+
732
+ result = self._func(*args, **kwargs)
733
+
734
+ with self._lock:
735
+ self._cache[args] = result
736
+ self._misses += 1
737
+ if len(self._cache) > self._max_size:
738
+ self._cache.popitem(last=False)
739
+
740
+ return result
741
+
742
+ def is_key_cached(self, *args: Any) -> bool:
743
+ with self._lock:
744
+ return args in self._cache
745
+
746
+ def add_key(self, result: T, *args: Any) -> None:
747
+ with self._lock:
748
+ self._cache[args] = result
749
+ if len(self._cache) > self._max_size:
750
+ self._cache.popitem(last=False)
751
+
752
+ def cache_info(self) -> UpdatableLRU.CacheInfo:
753
+ with self._lock:
754
+ return self.CacheInfo(
755
+ maxsize=self._max_size,
756
+ currsize=len(self._cache),
757
+ hits=self._hits,
758
+ misses=self._misses,
759
+ )
760
+
761
+
762
+ class BackgroundBlockCache(BaseCache):
763
+ """
764
+ Cache holding memory as a set of blocks with pre-loading of
765
+ the next block in the background.
766
+
767
+ Requests are only ever made ``blocksize`` at a time, and are
768
+ stored in an LRU cache. The least recently accessed block is
769
+ discarded when more than ``maxblocks`` are stored. If the
770
+ next block is not in cache, it is loaded in a separate thread
771
+ in non-blocking way.
772
+
773
+ Parameters
774
+ ----------
775
+ blocksize : int
776
+ The number of bytes to store in each block.
777
+ Requests are only ever made for ``blocksize``, so this
778
+ should balance the overhead of making a request against
779
+ the granularity of the blocks.
780
+ fetcher : Callable
781
+ size : int
782
+ The total size of the file being cached.
783
+ maxblocks : int
784
+ The maximum number of blocks to cache for. The maximum memory
785
+ use for this cache is then ``blocksize * maxblocks``.
786
+ """
787
+
788
+ name: ClassVar[str] = "background"
789
+
790
+ def __init__(
791
+ self, blocksize: int, fetcher: Fetcher, size: int, maxblocks: int = 32
792
+ ) -> None:
793
+ super().__init__(blocksize, fetcher, size)
794
+ self.nblocks = math.ceil(size / blocksize)
795
+ self.maxblocks = maxblocks
796
+ self._fetch_block_cached = UpdatableLRU(self._fetch_block, maxblocks)
797
+
798
+ self._thread_executor = ThreadPoolExecutor(max_workers=1)
799
+ self._fetch_future_block_number: int | None = None
800
+ self._fetch_future: Future[bytes] | None = None
801
+ self._fetch_future_lock = threading.Lock()
802
+
803
+ def cache_info(self) -> UpdatableLRU.CacheInfo:
804
+ """
805
+ The statistics on the block cache.
806
+
807
+ Returns
808
+ -------
809
+ NamedTuple
810
+ Returned directly from the LRU Cache used internally.
811
+ """
812
+ return self._fetch_block_cached.cache_info()
813
+
814
+ def __getstate__(self) -> dict[str, Any]:
815
+ state = self.__dict__
816
+ del state["_fetch_block_cached"]
817
+ del state["_thread_executor"]
818
+ del state["_fetch_future_block_number"]
819
+ del state["_fetch_future"]
820
+ del state["_fetch_future_lock"]
821
+ return state
822
+
823
+ def __setstate__(self, state) -> None:
824
+ self.__dict__.update(state)
825
+ self._fetch_block_cached = UpdatableLRU(self._fetch_block, state["maxblocks"])
826
+ self._thread_executor = ThreadPoolExecutor(max_workers=1)
827
+ self._fetch_future_block_number = None
828
+ self._fetch_future = None
829
+ self._fetch_future_lock = threading.Lock()
830
+
831
+ def _fetch(self, start: int | None, end: int | None) -> bytes:
832
+ if start is None:
833
+ start = 0
834
+ if end is None:
835
+ end = self.size
836
+ if start >= self.size or start >= end:
837
+ return b""
838
+
839
+ # byte position -> block numbers
840
+ start_block_number = start // self.blocksize
841
+ end_block_number = end // self.blocksize
842
+
843
+ fetch_future_block_number = None
844
+ fetch_future = None
845
+ with self._fetch_future_lock:
846
+ # Background thread is running. Check we we can or must join it.
847
+ if self._fetch_future is not None:
848
+ assert self._fetch_future_block_number is not None
849
+ if self._fetch_future.done():
850
+ logger.info("BlockCache joined background fetch without waiting.")
851
+ self._fetch_block_cached.add_key(
852
+ self._fetch_future.result(), self._fetch_future_block_number
853
+ )
854
+ # Cleanup the fetch variables. Done with fetching the block.
855
+ self._fetch_future_block_number = None
856
+ self._fetch_future = None
857
+ else:
858
+ # Must join if we need the block for the current fetch
859
+ must_join = bool(
860
+ start_block_number
861
+ <= self._fetch_future_block_number
862
+ <= end_block_number
863
+ )
864
+ if must_join:
865
+ # Copy to the local variables to release lock
866
+ # before waiting for result
867
+ fetch_future_block_number = self._fetch_future_block_number
868
+ fetch_future = self._fetch_future
869
+
870
+ # Cleanup the fetch variables. Have a local copy.
871
+ self._fetch_future_block_number = None
872
+ self._fetch_future = None
873
+
874
+ # Need to wait for the future for the current read
875
+ if fetch_future is not None:
876
+ logger.info("BlockCache waiting for background fetch.")
877
+ # Wait until result and put it in cache
878
+ self._fetch_block_cached.add_key(
879
+ fetch_future.result(), fetch_future_block_number
880
+ )
881
+
882
+ # these are cached, so safe to do multiple calls for the same start and end.
883
+ for block_number in range(start_block_number, end_block_number + 1):
884
+ self._fetch_block_cached(block_number)
885
+
886
+ # fetch next block in the background if nothing is running in the background,
887
+ # the block is within file and it is not already cached
888
+ end_block_plus_1 = end_block_number + 1
889
+ with self._fetch_future_lock:
890
+ if (
891
+ self._fetch_future is None
892
+ and end_block_plus_1 <= self.nblocks
893
+ and not self._fetch_block_cached.is_key_cached(end_block_plus_1)
894
+ ):
895
+ self._fetch_future_block_number = end_block_plus_1
896
+ self._fetch_future = self._thread_executor.submit(
897
+ self._fetch_block, end_block_plus_1, "async"
898
+ )
899
+
900
+ return self._read_cache(
901
+ start,
902
+ end,
903
+ start_block_number=start_block_number,
904
+ end_block_number=end_block_number,
905
+ )
906
+
907
+ def _fetch_block(self, block_number: int, log_info: str = "sync") -> bytes:
908
+ """
909
+ Fetch the block of data for `block_number`.
910
+ """
911
+ if block_number > self.nblocks:
912
+ raise ValueError(
913
+ f"'block_number={block_number}' is greater than "
914
+ f"the number of blocks ({self.nblocks})"
915
+ )
916
+
917
+ start = block_number * self.blocksize
918
+ end = start + self.blocksize
919
+ logger.info("BlockCache fetching block (%s) %d", log_info, block_number)
920
+ self.total_requested_bytes += end - start
921
+ self.miss_count += 1
922
+ block_contents = super()._fetch(start, end)
923
+ return block_contents
924
+
925
+ def _read_cache(
926
+ self, start: int, end: int, start_block_number: int, end_block_number: int
927
+ ) -> bytes:
928
+ """
929
+ Read from our block cache.
930
+
931
+ Parameters
932
+ ----------
933
+ start, end : int
934
+ The start and end byte positions.
935
+ start_block_number, end_block_number : int
936
+ The start and end block numbers.
937
+ """
938
+ start_pos = start % self.blocksize
939
+ end_pos = end % self.blocksize
940
+
941
+ # kind of pointless to count this as a hit, but it is
942
+ self.hit_count += 1
943
+
944
+ if start_block_number == end_block_number:
945
+ block = self._fetch_block_cached(start_block_number)
946
+ return block[start_pos:end_pos]
947
+
948
+ else:
949
+ # read from the initial
950
+ out = [self._fetch_block_cached(start_block_number)[start_pos:]]
951
+
952
+ # intermediate blocks
953
+ # Note: it'd be nice to combine these into one big request. However
954
+ # that doesn't play nicely with our LRU cache.
955
+ out.extend(
956
+ map(
957
+ self._fetch_block_cached,
958
+ range(start_block_number + 1, end_block_number),
959
+ )
960
+ )
961
+
962
+ # final block
963
+ out.append(self._fetch_block_cached(end_block_number)[:end_pos])
964
+
965
+ return b"".join(out)
966
+
967
+
968
+ caches: dict[str | None, type[BaseCache]] = {
969
+ # one custom case
970
+ None: BaseCache,
971
+ }
972
+
973
+
974
+ def register_cache(cls: type[BaseCache], clobber: bool = False) -> None:
975
+ """'Register' cache implementation.
976
+
977
+ Parameters
978
+ ----------
979
+ clobber: bool, optional
980
+ If set to True (default is False) - allow to overwrite existing
981
+ entry.
982
+
983
+ Raises
984
+ ------
985
+ ValueError
986
+ """
987
+ name = cls.name
988
+ if not clobber and name in caches:
989
+ raise ValueError(f"Cache with name {name!r} is already known: {caches[name]}")
990
+ caches[name] = cls
991
+
992
+
993
+ for c in (
994
+ BaseCache,
995
+ MMapCache,
996
+ BytesCache,
997
+ ReadAheadCache,
998
+ BlockCache,
999
+ FirstChunkCache,
1000
+ AllBytes,
1001
+ KnownPartsOfAFile,
1002
+ BackgroundBlockCache,
1003
+ ):
1004
+ register_cache(c)
venv/lib/python3.13/site-packages/fsspec/callbacks.py ADDED
@@ -0,0 +1,324 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import wraps
2
+
3
+
4
+ class Callback:
5
+ """
6
+ Base class and interface for callback mechanism
7
+
8
+ This class can be used directly for monitoring file transfers by
9
+ providing ``callback=Callback(hooks=...)`` (see the ``hooks`` argument,
10
+ below), or subclassed for more specialised behaviour.
11
+
12
+ Parameters
13
+ ----------
14
+ size: int (optional)
15
+ Nominal quantity for the value that corresponds to a complete
16
+ transfer, e.g., total number of tiles or total number of
17
+ bytes
18
+ value: int (0)
19
+ Starting internal counter value
20
+ hooks: dict or None
21
+ A dict of named functions to be called on each update. The signature
22
+ of these must be ``f(size, value, **kwargs)``
23
+ """
24
+
25
+ def __init__(self, size=None, value=0, hooks=None, **kwargs):
26
+ self.size = size
27
+ self.value = value
28
+ self.hooks = hooks or {}
29
+ self.kw = kwargs
30
+
31
+ def __enter__(self):
32
+ return self
33
+
34
+ def __exit__(self, *exc_args):
35
+ self.close()
36
+
37
+ def close(self):
38
+ """Close callback."""
39
+
40
+ def branched(self, path_1, path_2, **kwargs):
41
+ """
42
+ Return callback for child transfers
43
+
44
+ If this callback is operating at a higher level, e.g., put, which may
45
+ trigger transfers that can also be monitored. The function returns a callback
46
+ that has to be passed to the child method, e.g., put_file,
47
+ as `callback=` argument.
48
+
49
+ The implementation uses `callback.branch` for compatibility.
50
+ When implementing callbacks, it is recommended to override this function instead
51
+ of `branch` and avoid calling `super().branched(...)`.
52
+
53
+ Prefer using this function over `branch`.
54
+
55
+ Parameters
56
+ ----------
57
+ path_1: str
58
+ Child's source path
59
+ path_2: str
60
+ Child's destination path
61
+ **kwargs:
62
+ Arbitrary keyword arguments
63
+
64
+ Returns
65
+ -------
66
+ callback: Callback
67
+ A callback instance to be passed to the child method
68
+ """
69
+ self.branch(path_1, path_2, kwargs)
70
+ # mutate kwargs so that we can force the caller to pass "callback=" explicitly
71
+ return kwargs.pop("callback", DEFAULT_CALLBACK)
72
+
73
+ def branch_coro(self, fn):
74
+ """
75
+ Wraps a coroutine, and pass a new child callback to it.
76
+ """
77
+
78
+ @wraps(fn)
79
+ async def func(path1, path2: str, **kwargs):
80
+ with self.branched(path1, path2, **kwargs) as child:
81
+ return await fn(path1, path2, callback=child, **kwargs)
82
+
83
+ return func
84
+
85
+ def set_size(self, size):
86
+ """
87
+ Set the internal maximum size attribute
88
+
89
+ Usually called if not initially set at instantiation. Note that this
90
+ triggers a ``call()``.
91
+
92
+ Parameters
93
+ ----------
94
+ size: int
95
+ """
96
+ self.size = size
97
+ self.call()
98
+
99
+ def absolute_update(self, value):
100
+ """
101
+ Set the internal value state
102
+
103
+ Triggers ``call()``
104
+
105
+ Parameters
106
+ ----------
107
+ value: int
108
+ """
109
+ self.value = value
110
+ self.call()
111
+
112
+ def relative_update(self, inc=1):
113
+ """
114
+ Delta increment the internal counter
115
+
116
+ Triggers ``call()``
117
+
118
+ Parameters
119
+ ----------
120
+ inc: int
121
+ """
122
+ self.value += inc
123
+ self.call()
124
+
125
+ def call(self, hook_name=None, **kwargs):
126
+ """
127
+ Execute hook(s) with current state
128
+
129
+ Each function is passed the internal size and current value
130
+
131
+ Parameters
132
+ ----------
133
+ hook_name: str or None
134
+ If given, execute on this hook
135
+ kwargs: passed on to (all) hook(s)
136
+ """
137
+ if not self.hooks:
138
+ return
139
+ kw = self.kw.copy()
140
+ kw.update(kwargs)
141
+ if hook_name:
142
+ if hook_name not in self.hooks:
143
+ return
144
+ return self.hooks[hook_name](self.size, self.value, **kw)
145
+ for hook in self.hooks.values() or []:
146
+ hook(self.size, self.value, **kw)
147
+
148
+ def wrap(self, iterable):
149
+ """
150
+ Wrap an iterable to call ``relative_update`` on each iterations
151
+
152
+ Parameters
153
+ ----------
154
+ iterable: Iterable
155
+ The iterable that is being wrapped
156
+ """
157
+ for item in iterable:
158
+ self.relative_update()
159
+ yield item
160
+
161
+ def branch(self, path_1, path_2, kwargs):
162
+ """
163
+ Set callbacks for child transfers
164
+
165
+ If this callback is operating at a higher level, e.g., put, which may
166
+ trigger transfers that can also be monitored. The passed kwargs are
167
+ to be *mutated* to add ``callback=``, if this class supports branching
168
+ to children.
169
+
170
+ Parameters
171
+ ----------
172
+ path_1: str
173
+ Child's source path
174
+ path_2: str
175
+ Child's destination path
176
+ kwargs: dict
177
+ arguments passed to child method, e.g., put_file.
178
+
179
+ Returns
180
+ -------
181
+
182
+ """
183
+ return None
184
+
185
+ def no_op(self, *_, **__):
186
+ pass
187
+
188
+ def __getattr__(self, item):
189
+ """
190
+ If undefined methods are called on this class, nothing happens
191
+ """
192
+ return self.no_op
193
+
194
+ @classmethod
195
+ def as_callback(cls, maybe_callback=None):
196
+ """Transform callback=... into Callback instance
197
+
198
+ For the special value of ``None``, return the global instance of
199
+ ``NoOpCallback``. This is an alternative to including
200
+ ``callback=DEFAULT_CALLBACK`` directly in a method signature.
201
+ """
202
+ if maybe_callback is None:
203
+ return DEFAULT_CALLBACK
204
+ return maybe_callback
205
+
206
+
207
+ class NoOpCallback(Callback):
208
+ """
209
+ This implementation of Callback does exactly nothing
210
+ """
211
+
212
+ def call(self, *args, **kwargs):
213
+ return None
214
+
215
+
216
+ class DotPrinterCallback(Callback):
217
+ """
218
+ Simple example Callback implementation
219
+
220
+ Almost identical to Callback with a hook that prints a char; here we
221
+ demonstrate how the outer layer may print "#" and the inner layer "."
222
+ """
223
+
224
+ def __init__(self, chr_to_print="#", **kwargs):
225
+ self.chr = chr_to_print
226
+ super().__init__(**kwargs)
227
+
228
+ def branch(self, path_1, path_2, kwargs):
229
+ """Mutate kwargs to add new instance with different print char"""
230
+ kwargs["callback"] = DotPrinterCallback(".")
231
+
232
+ def call(self, **kwargs):
233
+ """Just outputs a character"""
234
+ print(self.chr, end="")
235
+
236
+
237
+ class TqdmCallback(Callback):
238
+ """
239
+ A callback to display a progress bar using tqdm
240
+
241
+ Parameters
242
+ ----------
243
+ tqdm_kwargs : dict, (optional)
244
+ Any argument accepted by the tqdm constructor.
245
+ See the `tqdm doc <https://tqdm.github.io/docs/tqdm/#__init__>`_.
246
+ Will be forwarded to `tqdm_cls`.
247
+ tqdm_cls: (optional)
248
+ subclass of `tqdm.tqdm`. If not passed, it will default to `tqdm.tqdm`.
249
+
250
+ Examples
251
+ --------
252
+ >>> import fsspec
253
+ >>> from fsspec.callbacks import TqdmCallback
254
+ >>> fs = fsspec.filesystem("memory")
255
+ >>> path2distant_data = "/your-path"
256
+ >>> fs.upload(
257
+ ".",
258
+ path2distant_data,
259
+ recursive=True,
260
+ callback=TqdmCallback(),
261
+ )
262
+
263
+ You can forward args to tqdm using the ``tqdm_kwargs`` parameter.
264
+
265
+ >>> fs.upload(
266
+ ".",
267
+ path2distant_data,
268
+ recursive=True,
269
+ callback=TqdmCallback(tqdm_kwargs={"desc": "Your tqdm description"}),
270
+ )
271
+
272
+ You can also customize the progress bar by passing a subclass of `tqdm`.
273
+
274
+ .. code-block:: python
275
+
276
+ class TqdmFormat(tqdm):
277
+ '''Provides a `total_time` format parameter'''
278
+ @property
279
+ def format_dict(self):
280
+ d = super().format_dict
281
+ total_time = d["elapsed"] * (d["total"] or 0) / max(d["n"], 1)
282
+ d.update(total_time=self.format_interval(total_time) + " in total")
283
+ return d
284
+
285
+ >>> with TqdmCallback(
286
+ tqdm_kwargs={
287
+ "desc": "desc",
288
+ "bar_format": "{total_time}: {percentage:.0f}%|{bar}{r_bar}",
289
+ },
290
+ tqdm_cls=TqdmFormat,
291
+ ) as callback:
292
+ fs.upload(".", path2distant_data, recursive=True, callback=callback)
293
+ """
294
+
295
+ def __init__(self, tqdm_kwargs=None, *args, **kwargs):
296
+ try:
297
+ from tqdm import tqdm
298
+
299
+ except ImportError as exce:
300
+ raise ImportError(
301
+ "Using TqdmCallback requires tqdm to be installed"
302
+ ) from exce
303
+
304
+ self._tqdm_cls = kwargs.pop("tqdm_cls", tqdm)
305
+ self._tqdm_kwargs = tqdm_kwargs or {}
306
+ self.tqdm = None
307
+ super().__init__(*args, **kwargs)
308
+
309
+ def call(self, *args, **kwargs):
310
+ if self.tqdm is None:
311
+ self.tqdm = self._tqdm_cls(total=self.size, **self._tqdm_kwargs)
312
+ self.tqdm.total = self.size
313
+ self.tqdm.update(self.value - self.tqdm.n)
314
+
315
+ def close(self):
316
+ if self.tqdm is not None:
317
+ self.tqdm.close()
318
+ self.tqdm = None
319
+
320
+ def __del__(self):
321
+ return self.close()
322
+
323
+
324
+ DEFAULT_CALLBACK = _DEFAULT_CALLBACK = NoOpCallback()
venv/lib/python3.13/site-packages/fsspec/compression.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Helper functions for a standard streaming compression API"""
2
+
3
+ from zipfile import ZipFile
4
+
5
+ import fsspec.utils
6
+ from fsspec.spec import AbstractBufferedFile
7
+
8
+
9
+ def noop_file(file, mode, **kwargs):
10
+ return file
11
+
12
+
13
+ # TODO: files should also be available as contexts
14
+ # should be functions of the form func(infile, mode=, **kwargs) -> file-like
15
+ compr = {None: noop_file}
16
+
17
+
18
+ def register_compression(name, callback, extensions, force=False):
19
+ """Register an "inferable" file compression type.
20
+
21
+ Registers transparent file compression type for use with fsspec.open.
22
+ Compression can be specified by name in open, or "infer"-ed for any files
23
+ ending with the given extensions.
24
+
25
+ Args:
26
+ name: (str) The compression type name. Eg. "gzip".
27
+ callback: A callable of form (infile, mode, **kwargs) -> file-like.
28
+ Accepts an input file-like object, the target mode and kwargs.
29
+ Returns a wrapped file-like object.
30
+ extensions: (str, Iterable[str]) A file extension, or list of file
31
+ extensions for which to infer this compression scheme. Eg. "gz".
32
+ force: (bool) Force re-registration of compression type or extensions.
33
+
34
+ Raises:
35
+ ValueError: If name or extensions already registered, and not force.
36
+
37
+ """
38
+ if isinstance(extensions, str):
39
+ extensions = [extensions]
40
+
41
+ # Validate registration
42
+ if name in compr and not force:
43
+ raise ValueError(f"Duplicate compression registration: {name}")
44
+
45
+ for ext in extensions:
46
+ if ext in fsspec.utils.compressions and not force:
47
+ raise ValueError(f"Duplicate compression file extension: {ext} ({name})")
48
+
49
+ compr[name] = callback
50
+
51
+ for ext in extensions:
52
+ fsspec.utils.compressions[ext] = name
53
+
54
+
55
+ def unzip(infile, mode="rb", filename=None, **kwargs):
56
+ if "r" not in mode:
57
+ filename = filename or "file"
58
+ z = ZipFile(infile, mode="w", **kwargs)
59
+ fo = z.open(filename, mode="w")
60
+ fo.close = lambda closer=fo.close: closer() or z.close()
61
+ return fo
62
+ z = ZipFile(infile)
63
+ if filename is None:
64
+ filename = z.namelist()[0]
65
+ return z.open(filename, mode="r", **kwargs)
66
+
67
+
68
+ register_compression("zip", unzip, "zip")
69
+
70
+ try:
71
+ from bz2 import BZ2File
72
+ except ImportError:
73
+ pass
74
+ else:
75
+ register_compression("bz2", BZ2File, "bz2")
76
+
77
+ try: # pragma: no cover
78
+ from isal import igzip
79
+
80
+ def isal(infile, mode="rb", **kwargs):
81
+ return igzip.IGzipFile(fileobj=infile, mode=mode, **kwargs)
82
+
83
+ register_compression("gzip", isal, "gz")
84
+ except ImportError:
85
+ from gzip import GzipFile
86
+
87
+ register_compression(
88
+ "gzip", lambda f, **kwargs: GzipFile(fileobj=f, **kwargs), "gz"
89
+ )
90
+
91
+ try:
92
+ from lzma import LZMAFile
93
+
94
+ register_compression("lzma", LZMAFile, "lzma")
95
+ register_compression("xz", LZMAFile, "xz")
96
+ except ImportError:
97
+ pass
98
+
99
+ try:
100
+ import lzmaffi
101
+
102
+ register_compression("lzma", lzmaffi.LZMAFile, "lzma", force=True)
103
+ register_compression("xz", lzmaffi.LZMAFile, "xz", force=True)
104
+ except ImportError:
105
+ pass
106
+
107
+
108
+ class SnappyFile(AbstractBufferedFile):
109
+ def __init__(self, infile, mode, **kwargs):
110
+ import snappy
111
+
112
+ super().__init__(
113
+ fs=None, path="snappy", mode=mode.strip("b") + "b", size=999999999, **kwargs
114
+ )
115
+ self.infile = infile
116
+ if "r" in mode:
117
+ self.codec = snappy.StreamDecompressor()
118
+ else:
119
+ self.codec = snappy.StreamCompressor()
120
+
121
+ def _upload_chunk(self, final=False):
122
+ self.buffer.seek(0)
123
+ out = self.codec.add_chunk(self.buffer.read())
124
+ self.infile.write(out)
125
+ return True
126
+
127
+ def seek(self, loc, whence=0):
128
+ raise NotImplementedError("SnappyFile is not seekable")
129
+
130
+ def seekable(self):
131
+ return False
132
+
133
+ def _fetch_range(self, start, end):
134
+ """Get the specified set of bytes from remote"""
135
+ data = self.infile.read(end - start)
136
+ return self.codec.decompress(data)
137
+
138
+
139
+ try:
140
+ import snappy
141
+
142
+ snappy.compress(b"")
143
+ # Snappy may use the .sz file extension, but this is not part of the
144
+ # standard implementation.
145
+ register_compression("snappy", SnappyFile, [])
146
+
147
+ except (ImportError, NameError, AttributeError):
148
+ pass
149
+
150
+ try:
151
+ import lz4.frame
152
+
153
+ register_compression("lz4", lz4.frame.open, "lz4")
154
+ except ImportError:
155
+ pass
156
+
157
+ try:
158
+ # zstd in the standard library for python >= 3.14
159
+ from compression.zstd import ZstdFile
160
+
161
+ register_compression("zstd", ZstdFile, "zst")
162
+
163
+ except ImportError:
164
+ try:
165
+ import zstandard as zstd
166
+
167
+ def zstandard_file(infile, mode="rb"):
168
+ if "r" in mode:
169
+ cctx = zstd.ZstdDecompressor()
170
+ return cctx.stream_reader(infile)
171
+ else:
172
+ cctx = zstd.ZstdCompressor(level=10)
173
+ return cctx.stream_writer(infile)
174
+
175
+ register_compression("zstd", zstandard_file, "zst")
176
+ except ImportError:
177
+ pass
178
+
179
+
180
+ def available_compressions():
181
+ """Return a list of the implemented compressions."""
182
+ return list(compr)
venv/lib/python3.13/site-packages/fsspec/config.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import configparser
4
+ import json
5
+ import os
6
+ import warnings
7
+ from typing import Any
8
+
9
+ conf: dict[str, dict[str, Any]] = {}
10
+ default_conf_dir = os.path.join(os.path.expanduser("~"), ".config/fsspec")
11
+ conf_dir = os.environ.get("FSSPEC_CONFIG_DIR", default_conf_dir)
12
+
13
+
14
+ def set_conf_env(conf_dict, envdict=os.environ):
15
+ """Set config values from environment variables
16
+
17
+ Looks for variables of the form ``FSSPEC_<protocol>`` and
18
+ ``FSSPEC_<protocol>_<kwarg>``. For ``FSSPEC_<protocol>`` the value is parsed
19
+ as a json dictionary and used to ``update`` the config of the
20
+ corresponding protocol. For ``FSSPEC_<protocol>_<kwarg>`` there is no
21
+ attempt to convert the string value, but the kwarg keys will be lower-cased.
22
+
23
+ The ``FSSPEC_<protocol>_<kwarg>`` variables are applied after the
24
+ ``FSSPEC_<protocol>`` ones.
25
+
26
+ Parameters
27
+ ----------
28
+ conf_dict : dict(str, dict)
29
+ This dict will be mutated
30
+ envdict : dict-like(str, str)
31
+ Source for the values - usually the real environment
32
+ """
33
+ kwarg_keys = []
34
+ for key in envdict:
35
+ if key.startswith("FSSPEC_") and len(key) > 7 and key[7] != "_":
36
+ if key.count("_") > 1:
37
+ kwarg_keys.append(key)
38
+ continue
39
+ try:
40
+ value = json.loads(envdict[key])
41
+ except json.decoder.JSONDecodeError as ex:
42
+ warnings.warn(
43
+ f"Ignoring environment variable {key} due to a parse failure: {ex}"
44
+ )
45
+ else:
46
+ if isinstance(value, dict):
47
+ _, proto = key.split("_", 1)
48
+ conf_dict.setdefault(proto.lower(), {}).update(value)
49
+ else:
50
+ warnings.warn(
51
+ f"Ignoring environment variable {key} due to not being a dict:"
52
+ f" {type(value)}"
53
+ )
54
+ elif key.startswith("FSSPEC"):
55
+ warnings.warn(
56
+ f"Ignoring environment variable {key} due to having an unexpected name"
57
+ )
58
+
59
+ for key in kwarg_keys:
60
+ _, proto, kwarg = key.split("_", 2)
61
+ conf_dict.setdefault(proto.lower(), {})[kwarg.lower()] = envdict[key]
62
+
63
+
64
+ def set_conf_files(cdir, conf_dict):
65
+ """Set config values from files
66
+
67
+ Scans for INI and JSON files in the given dictionary, and uses their
68
+ contents to set the config. In case of repeated values, later values
69
+ win.
70
+
71
+ In the case of INI files, all values are strings, and these will not
72
+ be converted.
73
+
74
+ Parameters
75
+ ----------
76
+ cdir : str
77
+ Directory to search
78
+ conf_dict : dict(str, dict)
79
+ This dict will be mutated
80
+ """
81
+ if not os.path.isdir(cdir):
82
+ return
83
+ allfiles = sorted(os.listdir(cdir))
84
+ for fn in allfiles:
85
+ if fn.endswith(".ini"):
86
+ ini = configparser.ConfigParser()
87
+ ini.read(os.path.join(cdir, fn))
88
+ for key in ini:
89
+ if key == "DEFAULT":
90
+ continue
91
+ conf_dict.setdefault(key, {}).update(dict(ini[key]))
92
+ if fn.endswith(".json"):
93
+ with open(os.path.join(cdir, fn)) as f:
94
+ js = json.load(f)
95
+ for key in js:
96
+ conf_dict.setdefault(key, {}).update(dict(js[key]))
97
+
98
+
99
+ def apply_config(cls, kwargs, conf_dict=None):
100
+ """Supply default values for kwargs when instantiating class
101
+
102
+ Augments the passed kwargs, by finding entries in the config dict
103
+ which match the classes ``.protocol`` attribute (one or more str)
104
+
105
+ Parameters
106
+ ----------
107
+ cls : file system implementation
108
+ kwargs : dict
109
+ conf_dict : dict of dict
110
+ Typically this is the global configuration
111
+
112
+ Returns
113
+ -------
114
+ dict : the modified set of kwargs
115
+ """
116
+ if conf_dict is None:
117
+ conf_dict = conf
118
+ protos = cls.protocol if isinstance(cls.protocol, (tuple, list)) else [cls.protocol]
119
+ kw = {}
120
+ for proto in protos:
121
+ # default kwargs from the current state of the config
122
+ if proto in conf_dict:
123
+ kw.update(conf_dict[proto])
124
+ # explicit kwargs always win
125
+ kw.update(**kwargs)
126
+ kwargs = kw
127
+ return kwargs
128
+
129
+
130
+ set_conf_files(conf_dir, conf)
131
+ set_conf_env(conf)
venv/lib/python3.13/site-packages/fsspec/conftest.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import shutil
3
+ import subprocess
4
+ import sys
5
+ import time
6
+
7
+ import pytest
8
+
9
+ import fsspec
10
+ from fsspec.implementations.cached import CachingFileSystem
11
+
12
+
13
+ @pytest.fixture()
14
+ def m():
15
+ """
16
+ Fixture providing a memory filesystem.
17
+ """
18
+ m = fsspec.filesystem("memory")
19
+ m.store.clear()
20
+ m.pseudo_dirs.clear()
21
+ m.pseudo_dirs.append("")
22
+ try:
23
+ yield m
24
+ finally:
25
+ m.store.clear()
26
+ m.pseudo_dirs.clear()
27
+ m.pseudo_dirs.append("")
28
+
29
+
30
+ @pytest.fixture
31
+ def ftp_writable(tmpdir):
32
+ """
33
+ Fixture providing a writable FTP filesystem.
34
+ """
35
+ pytest.importorskip("pyftpdlib")
36
+ from fsspec.implementations.ftp import FTPFileSystem
37
+
38
+ FTPFileSystem.clear_instance_cache() # remove lingering connections
39
+ CachingFileSystem.clear_instance_cache()
40
+ d = str(tmpdir)
41
+ with open(os.path.join(d, "out"), "wb") as f:
42
+ f.write(b"hello" * 10000)
43
+ P = subprocess.Popen(
44
+ [sys.executable, "-m", "pyftpdlib", "-d", d, "-u", "user", "-P", "pass", "-w"]
45
+ )
46
+ try:
47
+ time.sleep(1)
48
+ yield "localhost", 2121, "user", "pass"
49
+ finally:
50
+ P.terminate()
51
+ P.wait()
52
+ try:
53
+ shutil.rmtree(tmpdir)
54
+ except Exception:
55
+ pass
venv/lib/python3.13/site-packages/fsspec/core.py ADDED
@@ -0,0 +1,743 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import io
4
+ import logging
5
+ import os
6
+ import re
7
+ from glob import has_magic
8
+ from pathlib import Path
9
+
10
+ # for backwards compat, we export cache things from here too
11
+ from fsspec.caching import ( # noqa: F401
12
+ BaseCache,
13
+ BlockCache,
14
+ BytesCache,
15
+ MMapCache,
16
+ ReadAheadCache,
17
+ caches,
18
+ )
19
+ from fsspec.compression import compr
20
+ from fsspec.config import conf
21
+ from fsspec.registry import filesystem, get_filesystem_class
22
+ from fsspec.utils import (
23
+ _unstrip_protocol,
24
+ build_name_function,
25
+ infer_compression,
26
+ stringify_path,
27
+ )
28
+
29
+ logger = logging.getLogger("fsspec")
30
+
31
+
32
+ class OpenFile:
33
+ """
34
+ File-like object to be used in a context
35
+
36
+ Can layer (buffered) text-mode and compression over any file-system, which
37
+ are typically binary-only.
38
+
39
+ These instances are safe to serialize, as the low-level file object
40
+ is not created until invoked using ``with``.
41
+
42
+ Parameters
43
+ ----------
44
+ fs: FileSystem
45
+ The file system to use for opening the file. Should be a subclass or duck-type
46
+ with ``fsspec.spec.AbstractFileSystem``
47
+ path: str
48
+ Location to open
49
+ mode: str like 'rb', optional
50
+ Mode of the opened file
51
+ compression: str or None, optional
52
+ Compression to apply
53
+ encoding: str or None, optional
54
+ The encoding to use if opened in text mode.
55
+ errors: str or None, optional
56
+ How to handle encoding errors if opened in text mode.
57
+ newline: None or str
58
+ Passed to TextIOWrapper in text mode, how to handle line endings.
59
+ autoopen: bool
60
+ If True, calls open() immediately. Mostly used by pickle
61
+ pos: int
62
+ If given and autoopen is True, seek to this location immediately
63
+ """
64
+
65
+ def __init__(
66
+ self,
67
+ fs,
68
+ path,
69
+ mode="rb",
70
+ compression=None,
71
+ encoding=None,
72
+ errors=None,
73
+ newline=None,
74
+ ):
75
+ self.fs = fs
76
+ self.path = path
77
+ self.mode = mode
78
+ self.compression = get_compression(path, compression)
79
+ self.encoding = encoding
80
+ self.errors = errors
81
+ self.newline = newline
82
+ self.fobjects = []
83
+
84
+ def __reduce__(self):
85
+ return (
86
+ OpenFile,
87
+ (
88
+ self.fs,
89
+ self.path,
90
+ self.mode,
91
+ self.compression,
92
+ self.encoding,
93
+ self.errors,
94
+ self.newline,
95
+ ),
96
+ )
97
+
98
+ def __repr__(self):
99
+ return f"<OpenFile '{self.path}'>"
100
+
101
+ def __enter__(self):
102
+ mode = self.mode.replace("t", "").replace("b", "") + "b"
103
+
104
+ try:
105
+ f = self.fs.open(self.path, mode=mode)
106
+ except FileNotFoundError as e:
107
+ if has_magic(self.path):
108
+ raise FileNotFoundError(
109
+ "%s not found. The URL contains glob characters: you maybe needed\n"
110
+ "to pass expand=True in fsspec.open() or the storage_options of \n"
111
+ "your library. You can also set the config value 'open_expand'\n"
112
+ "before import, or fsspec.core.DEFAULT_EXPAND at runtime, to True.",
113
+ self.path,
114
+ ) from e
115
+ raise
116
+
117
+ self.fobjects = [f]
118
+
119
+ if self.compression is not None:
120
+ compress = compr[self.compression]
121
+ f = compress(f, mode=mode[0])
122
+ self.fobjects.append(f)
123
+
124
+ if "b" not in self.mode:
125
+ # assume, for example, that 'r' is equivalent to 'rt' as in builtin
126
+ f = PickleableTextIOWrapper(
127
+ f, encoding=self.encoding, errors=self.errors, newline=self.newline
128
+ )
129
+ self.fobjects.append(f)
130
+
131
+ return self.fobjects[-1]
132
+
133
+ def __exit__(self, *args):
134
+ self.close()
135
+
136
+ @property
137
+ def full_name(self):
138
+ return _unstrip_protocol(self.path, self.fs)
139
+
140
+ def open(self):
141
+ """Materialise this as a real open file without context
142
+
143
+ The OpenFile object should be explicitly closed to avoid enclosed file
144
+ instances persisting. You must, therefore, keep a reference to the OpenFile
145
+ during the life of the file-like it generates.
146
+ """
147
+ return self.__enter__()
148
+
149
+ def close(self):
150
+ """Close all encapsulated file objects"""
151
+ for f in reversed(self.fobjects):
152
+ if "r" not in self.mode and not f.closed:
153
+ f.flush()
154
+ f.close()
155
+ self.fobjects.clear()
156
+
157
+
158
+ class OpenFiles(list):
159
+ """List of OpenFile instances
160
+
161
+ Can be used in a single context, which opens and closes all of the
162
+ contained files. Normal list access to get the elements works as
163
+ normal.
164
+
165
+ A special case is made for caching filesystems - the files will
166
+ be down/uploaded together at the start or end of the context, and
167
+ this may happen concurrently, if the target filesystem supports it.
168
+ """
169
+
170
+ def __init__(self, *args, mode="rb", fs=None):
171
+ self.mode = mode
172
+ self.fs = fs
173
+ self.files = []
174
+ super().__init__(*args)
175
+
176
+ def __enter__(self):
177
+ if self.fs is None:
178
+ raise ValueError("Context has already been used")
179
+
180
+ fs = self.fs
181
+ while True:
182
+ if hasattr(fs, "open_many"):
183
+ # check for concurrent cache download; or set up for upload
184
+ self.files = fs.open_many(self)
185
+ return self.files
186
+ if hasattr(fs, "fs") and fs.fs is not None:
187
+ fs = fs.fs
188
+ else:
189
+ break
190
+ return [s.__enter__() for s in self]
191
+
192
+ def __exit__(self, *args):
193
+ fs = self.fs
194
+ [s.__exit__(*args) for s in self]
195
+ if "r" not in self.mode:
196
+ while True:
197
+ if hasattr(fs, "open_many"):
198
+ # check for concurrent cache upload
199
+ fs.commit_many(self.files)
200
+ return
201
+ if hasattr(fs, "fs") and fs.fs is not None:
202
+ fs = fs.fs
203
+ else:
204
+ break
205
+
206
+ def __getitem__(self, item):
207
+ out = super().__getitem__(item)
208
+ if isinstance(item, slice):
209
+ return OpenFiles(out, mode=self.mode, fs=self.fs)
210
+ return out
211
+
212
+ def __repr__(self):
213
+ return f"<List of {len(self)} OpenFile instances>"
214
+
215
+
216
+ def open_files(
217
+ urlpath,
218
+ mode="rb",
219
+ compression=None,
220
+ encoding="utf8",
221
+ errors=None,
222
+ name_function=None,
223
+ num=1,
224
+ protocol=None,
225
+ newline=None,
226
+ auto_mkdir=True,
227
+ expand=True,
228
+ **kwargs,
229
+ ):
230
+ """Given a path or paths, return a list of ``OpenFile`` objects.
231
+
232
+ For writing, a str path must contain the "*" character, which will be filled
233
+ in by increasing numbers, e.g., "part*" -> "part1", "part2" if num=2.
234
+
235
+ For either reading or writing, can instead provide explicit list of paths.
236
+
237
+ Parameters
238
+ ----------
239
+ urlpath: string or list
240
+ Absolute or relative filepath(s). Prefix with a protocol like ``s3://``
241
+ to read from alternative filesystems. To read from multiple files you
242
+ can pass a globstring or a list of paths, with the caveat that they
243
+ must all have the same protocol.
244
+ mode: 'rb', 'wt', etc.
245
+ compression: string or None
246
+ If given, open file using compression codec. Can either be a compression
247
+ name (a key in ``fsspec.compression.compr``) or "infer" to guess the
248
+ compression from the filename suffix.
249
+ encoding: str
250
+ For text mode only
251
+ errors: None or str
252
+ Passed to TextIOWrapper in text mode
253
+ name_function: function or None
254
+ if opening a set of files for writing, those files do not yet exist,
255
+ so we need to generate their names by formatting the urlpath for
256
+ each sequence number
257
+ num: int [1]
258
+ if writing mode, number of files we expect to create (passed to
259
+ name+function)
260
+ protocol: str or None
261
+ If given, overrides the protocol found in the URL.
262
+ newline: bytes or None
263
+ Used for line terminator in text mode. If None, uses system default;
264
+ if blank, uses no translation.
265
+ auto_mkdir: bool (True)
266
+ If in write mode, this will ensure the target directory exists before
267
+ writing, by calling ``fs.mkdirs(exist_ok=True)``.
268
+ expand: bool
269
+ **kwargs: dict
270
+ Extra options that make sense to a particular storage connection, e.g.
271
+ host, port, username, password, etc.
272
+
273
+ Examples
274
+ --------
275
+ >>> files = open_files('2015-*-*.csv') # doctest: +SKIP
276
+ >>> files = open_files(
277
+ ... 's3://bucket/2015-*-*.csv.gz', compression='gzip'
278
+ ... ) # doctest: +SKIP
279
+
280
+ Returns
281
+ -------
282
+ An ``OpenFiles`` instance, which is a list of ``OpenFile`` objects that can
283
+ be used as a single context
284
+
285
+ Notes
286
+ -----
287
+ For a full list of the available protocols and the implementations that
288
+ they map across to see the latest online documentation:
289
+
290
+ - For implementations built into ``fsspec`` see
291
+ https://filesystem-spec.readthedocs.io/en/latest/api.html#built-in-implementations
292
+ - For implementations in separate packages see
293
+ https://filesystem-spec.readthedocs.io/en/latest/api.html#other-known-implementations
294
+ """
295
+ fs, fs_token, paths = get_fs_token_paths(
296
+ urlpath,
297
+ mode,
298
+ num=num,
299
+ name_function=name_function,
300
+ storage_options=kwargs,
301
+ protocol=protocol,
302
+ expand=expand,
303
+ )
304
+ if fs.protocol == "file":
305
+ fs.auto_mkdir = auto_mkdir
306
+ elif "r" not in mode and auto_mkdir:
307
+ parents = {fs._parent(path) for path in paths}
308
+ for parent in parents:
309
+ try:
310
+ fs.makedirs(parent, exist_ok=True)
311
+ except PermissionError:
312
+ pass
313
+ return OpenFiles(
314
+ [
315
+ OpenFile(
316
+ fs,
317
+ path,
318
+ mode=mode,
319
+ compression=compression,
320
+ encoding=encoding,
321
+ errors=errors,
322
+ newline=newline,
323
+ )
324
+ for path in paths
325
+ ],
326
+ mode=mode,
327
+ fs=fs,
328
+ )
329
+
330
+
331
+ def _un_chain(path, kwargs):
332
+ # Avoid a circular import
333
+ from fsspec.implementations.cached import CachingFileSystem
334
+
335
+ if "::" in path:
336
+ x = re.compile(".*[^a-z]+.*") # test for non protocol-like single word
337
+ bits = []
338
+ for p in path.split("::"):
339
+ if "://" in p or x.match(p):
340
+ bits.append(p)
341
+ else:
342
+ bits.append(p + "://")
343
+ else:
344
+ bits = [path]
345
+ # [[url, protocol, kwargs], ...]
346
+ out = []
347
+ previous_bit = None
348
+ kwargs = kwargs.copy()
349
+ for bit in reversed(bits):
350
+ protocol = kwargs.pop("protocol", None) or split_protocol(bit)[0] or "file"
351
+ cls = get_filesystem_class(protocol)
352
+ extra_kwargs = cls._get_kwargs_from_urls(bit)
353
+ kws = kwargs.pop(protocol, {})
354
+ if bit is bits[0]:
355
+ kws.update(kwargs)
356
+ kw = dict(
357
+ **{k: v for k, v in extra_kwargs.items() if k not in kws or v != kws[k]},
358
+ **kws,
359
+ )
360
+ bit = cls._strip_protocol(bit)
361
+ if "target_protocol" not in kw and issubclass(cls, CachingFileSystem):
362
+ bit = previous_bit
363
+ out.append((bit, protocol, kw))
364
+ previous_bit = bit
365
+ out.reverse()
366
+ return out
367
+
368
+
369
+ def url_to_fs(url, **kwargs):
370
+ """
371
+ Turn fully-qualified and potentially chained URL into filesystem instance
372
+
373
+ Parameters
374
+ ----------
375
+ url : str
376
+ The fsspec-compatible URL
377
+ **kwargs: dict
378
+ Extra options that make sense to a particular storage connection, e.g.
379
+ host, port, username, password, etc.
380
+
381
+ Returns
382
+ -------
383
+ filesystem : FileSystem
384
+ The new filesystem discovered from ``url`` and created with
385
+ ``**kwargs``.
386
+ urlpath : str
387
+ The file-systems-specific URL for ``url``.
388
+ """
389
+ url = stringify_path(url)
390
+ # non-FS arguments that appear in fsspec.open()
391
+ # inspect could keep this in sync with open()'s signature
392
+ known_kwargs = {
393
+ "compression",
394
+ "encoding",
395
+ "errors",
396
+ "expand",
397
+ "mode",
398
+ "name_function",
399
+ "newline",
400
+ "num",
401
+ }
402
+ kwargs = {k: v for k, v in kwargs.items() if k not in known_kwargs}
403
+ chain = _un_chain(url, kwargs)
404
+ inkwargs = {}
405
+ # Reverse iterate the chain, creating a nested target_* structure
406
+ for i, ch in enumerate(reversed(chain)):
407
+ urls, protocol, kw = ch
408
+ if i == len(chain) - 1:
409
+ inkwargs = dict(**kw, **inkwargs)
410
+ continue
411
+ inkwargs["target_options"] = dict(**kw, **inkwargs)
412
+ inkwargs["target_protocol"] = protocol
413
+ inkwargs["fo"] = urls
414
+ urlpath, protocol, _ = chain[0]
415
+ fs = filesystem(protocol, **inkwargs)
416
+ return fs, urlpath
417
+
418
+
419
+ DEFAULT_EXPAND = conf.get("open_expand", False)
420
+
421
+
422
+ def open(
423
+ urlpath,
424
+ mode="rb",
425
+ compression=None,
426
+ encoding="utf8",
427
+ errors=None,
428
+ protocol=None,
429
+ newline=None,
430
+ expand=None,
431
+ **kwargs,
432
+ ):
433
+ """Given a path or paths, return one ``OpenFile`` object.
434
+
435
+ Parameters
436
+ ----------
437
+ urlpath: string or list
438
+ Absolute or relative filepath. Prefix with a protocol like ``s3://``
439
+ to read from alternative filesystems. Should not include glob
440
+ character(s).
441
+ mode: 'rb', 'wt', etc.
442
+ compression: string or None
443
+ If given, open file using compression codec. Can either be a compression
444
+ name (a key in ``fsspec.compression.compr``) or "infer" to guess the
445
+ compression from the filename suffix.
446
+ encoding: str
447
+ For text mode only
448
+ errors: None or str
449
+ Passed to TextIOWrapper in text mode
450
+ protocol: str or None
451
+ If given, overrides the protocol found in the URL.
452
+ newline: bytes or None
453
+ Used for line terminator in text mode. If None, uses system default;
454
+ if blank, uses no translation.
455
+ expand: bool or None
456
+ Whether to regard file paths containing special glob characters as needing
457
+ expansion (finding the first match) or absolute. Setting False allows using
458
+ paths which do embed such characters. If None (default), this argument
459
+ takes its value from the DEFAULT_EXPAND module variable, which takes
460
+ its initial value from the "open_expand" config value at startup, which will
461
+ be False if not set.
462
+ **kwargs: dict
463
+ Extra options that make sense to a particular storage connection, e.g.
464
+ host, port, username, password, etc.
465
+
466
+ Examples
467
+ --------
468
+ >>> openfile = open('2015-01-01.csv') # doctest: +SKIP
469
+ >>> openfile = open(
470
+ ... 's3://bucket/2015-01-01.csv.gz', compression='gzip'
471
+ ... ) # doctest: +SKIP
472
+ >>> with openfile as f:
473
+ ... df = pd.read_csv(f) # doctest: +SKIP
474
+ ...
475
+
476
+ Returns
477
+ -------
478
+ ``OpenFile`` object.
479
+
480
+ Notes
481
+ -----
482
+ For a full list of the available protocols and the implementations that
483
+ they map across to see the latest online documentation:
484
+
485
+ - For implementations built into ``fsspec`` see
486
+ https://filesystem-spec.readthedocs.io/en/latest/api.html#built-in-implementations
487
+ - For implementations in separate packages see
488
+ https://filesystem-spec.readthedocs.io/en/latest/api.html#other-known-implementations
489
+ """
490
+ expand = DEFAULT_EXPAND if expand is None else expand
491
+ out = open_files(
492
+ urlpath=[urlpath],
493
+ mode=mode,
494
+ compression=compression,
495
+ encoding=encoding,
496
+ errors=errors,
497
+ protocol=protocol,
498
+ newline=newline,
499
+ expand=expand,
500
+ **kwargs,
501
+ )
502
+ if not out:
503
+ raise FileNotFoundError(urlpath)
504
+ return out[0]
505
+
506
+
507
+ def open_local(
508
+ url: str | list[str] | Path | list[Path],
509
+ mode: str = "rb",
510
+ **storage_options: dict,
511
+ ) -> str | list[str]:
512
+ """Open file(s) which can be resolved to local
513
+
514
+ For files which either are local, or get downloaded upon open
515
+ (e.g., by file caching)
516
+
517
+ Parameters
518
+ ----------
519
+ url: str or list(str)
520
+ mode: str
521
+ Must be read mode
522
+ storage_options:
523
+ passed on to FS for or used by open_files (e.g., compression)
524
+ """
525
+ if "r" not in mode:
526
+ raise ValueError("Can only ensure local files when reading")
527
+ of = open_files(url, mode=mode, **storage_options)
528
+ if not getattr(of[0].fs, "local_file", False):
529
+ raise ValueError(
530
+ "open_local can only be used on a filesystem which"
531
+ " has attribute local_file=True"
532
+ )
533
+ with of as files:
534
+ paths = [f.name for f in files]
535
+ if (isinstance(url, str) and not has_magic(url)) or isinstance(url, Path):
536
+ return paths[0]
537
+ return paths
538
+
539
+
540
+ def get_compression(urlpath, compression):
541
+ if compression == "infer":
542
+ compression = infer_compression(urlpath)
543
+ if compression is not None and compression not in compr:
544
+ raise ValueError(f"Compression type {compression} not supported")
545
+ return compression
546
+
547
+
548
+ def split_protocol(urlpath):
549
+ """Return protocol, path pair"""
550
+ urlpath = stringify_path(urlpath)
551
+ if "://" in urlpath:
552
+ protocol, path = urlpath.split("://", 1)
553
+ if len(protocol) > 1:
554
+ # excludes Windows paths
555
+ return protocol, path
556
+ if urlpath.startswith("data:"):
557
+ return urlpath.split(":", 1)
558
+ return None, urlpath
559
+
560
+
561
+ def strip_protocol(urlpath):
562
+ """Return only path part of full URL, according to appropriate backend"""
563
+ protocol, _ = split_protocol(urlpath)
564
+ cls = get_filesystem_class(protocol)
565
+ return cls._strip_protocol(urlpath)
566
+
567
+
568
+ def expand_paths_if_needed(paths, mode, num, fs, name_function):
569
+ """Expand paths if they have a ``*`` in them (write mode) or any of ``*?[]``
570
+ in them (read mode).
571
+
572
+ :param paths: list of paths
573
+ mode: str
574
+ Mode in which to open files.
575
+ num: int
576
+ If opening in writing mode, number of files we expect to create.
577
+ fs: filesystem object
578
+ name_function: callable
579
+ If opening in writing mode, this callable is used to generate path
580
+ names. Names are generated for each partition by
581
+ ``urlpath.replace('*', name_function(partition_index))``.
582
+ :return: list of paths
583
+ """
584
+ expanded_paths = []
585
+ paths = list(paths)
586
+
587
+ if "w" in mode: # read mode
588
+ if sum(1 for p in paths if "*" in p) > 1:
589
+ raise ValueError(
590
+ "When writing data, only one filename mask can be specified."
591
+ )
592
+ num = max(num, len(paths))
593
+
594
+ for curr_path in paths:
595
+ if "*" in curr_path:
596
+ # expand using name_function
597
+ expanded_paths.extend(_expand_paths(curr_path, name_function, num))
598
+ else:
599
+ expanded_paths.append(curr_path)
600
+ # if we generated more paths that asked for, trim the list
601
+ if len(expanded_paths) > num:
602
+ expanded_paths = expanded_paths[:num]
603
+
604
+ else: # read mode
605
+ for curr_path in paths:
606
+ if has_magic(curr_path):
607
+ # expand using glob
608
+ expanded_paths.extend(fs.glob(curr_path))
609
+ else:
610
+ expanded_paths.append(curr_path)
611
+
612
+ return expanded_paths
613
+
614
+
615
+ def get_fs_token_paths(
616
+ urlpath,
617
+ mode="rb",
618
+ num=1,
619
+ name_function=None,
620
+ storage_options=None,
621
+ protocol=None,
622
+ expand=True,
623
+ ):
624
+ """Filesystem, deterministic token, and paths from a urlpath and options.
625
+
626
+ Parameters
627
+ ----------
628
+ urlpath: string or iterable
629
+ Absolute or relative filepath, URL (may include protocols like
630
+ ``s3://``), or globstring pointing to data.
631
+ mode: str, optional
632
+ Mode in which to open files.
633
+ num: int, optional
634
+ If opening in writing mode, number of files we expect to create.
635
+ name_function: callable, optional
636
+ If opening in writing mode, this callable is used to generate path
637
+ names. Names are generated for each partition by
638
+ ``urlpath.replace('*', name_function(partition_index))``.
639
+ storage_options: dict, optional
640
+ Additional keywords to pass to the filesystem class.
641
+ protocol: str or None
642
+ To override the protocol specifier in the URL
643
+ expand: bool
644
+ Expand string paths for writing, assuming the path is a directory
645
+ """
646
+ if isinstance(urlpath, (list, tuple, set)):
647
+ if not urlpath:
648
+ raise ValueError("empty urlpath sequence")
649
+ urlpath0 = stringify_path(next(iter(urlpath)))
650
+ else:
651
+ urlpath0 = stringify_path(urlpath)
652
+ storage_options = storage_options or {}
653
+ if protocol:
654
+ storage_options["protocol"] = protocol
655
+ chain = _un_chain(urlpath0, storage_options or {})
656
+ inkwargs = {}
657
+ # Reverse iterate the chain, creating a nested target_* structure
658
+ for i, ch in enumerate(reversed(chain)):
659
+ urls, nested_protocol, kw = ch
660
+ if i == len(chain) - 1:
661
+ inkwargs = dict(**kw, **inkwargs)
662
+ continue
663
+ inkwargs["target_options"] = dict(**kw, **inkwargs)
664
+ inkwargs["target_protocol"] = nested_protocol
665
+ inkwargs["fo"] = urls
666
+ paths, protocol, _ = chain[0]
667
+ fs = filesystem(protocol, **inkwargs)
668
+ if isinstance(urlpath, (list, tuple, set)):
669
+ pchains = [
670
+ _un_chain(stringify_path(u), storage_options or {})[0] for u in urlpath
671
+ ]
672
+ if len({pc[1] for pc in pchains}) > 1:
673
+ raise ValueError("Protocol mismatch getting fs from %s", urlpath)
674
+ paths = [pc[0] for pc in pchains]
675
+ else:
676
+ paths = fs._strip_protocol(paths)
677
+ if isinstance(paths, (list, tuple, set)):
678
+ if expand:
679
+ paths = expand_paths_if_needed(paths, mode, num, fs, name_function)
680
+ elif not isinstance(paths, list):
681
+ paths = list(paths)
682
+ else:
683
+ if ("w" in mode or "x" in mode) and expand:
684
+ paths = _expand_paths(paths, name_function, num)
685
+ elif "*" in paths:
686
+ paths = [f for f in sorted(fs.glob(paths)) if not fs.isdir(f)]
687
+ else:
688
+ paths = [paths]
689
+
690
+ return fs, fs._fs_token, paths
691
+
692
+
693
+ def _expand_paths(path, name_function, num):
694
+ if isinstance(path, str):
695
+ if path.count("*") > 1:
696
+ raise ValueError("Output path spec must contain exactly one '*'.")
697
+ elif "*" not in path:
698
+ path = os.path.join(path, "*.part")
699
+
700
+ if name_function is None:
701
+ name_function = build_name_function(num - 1)
702
+
703
+ paths = [path.replace("*", name_function(i)) for i in range(num)]
704
+ if paths != sorted(paths):
705
+ logger.warning(
706
+ "In order to preserve order between partitions"
707
+ " paths created with ``name_function`` should "
708
+ "sort to partition order"
709
+ )
710
+ elif isinstance(path, (tuple, list)):
711
+ assert len(path) == num
712
+ paths = list(path)
713
+ else:
714
+ raise ValueError(
715
+ "Path should be either\n"
716
+ "1. A list of paths: ['foo.json', 'bar.json', ...]\n"
717
+ "2. A directory: 'foo/\n"
718
+ "3. A path with a '*' in it: 'foo.*.json'"
719
+ )
720
+ return paths
721
+
722
+
723
+ class PickleableTextIOWrapper(io.TextIOWrapper):
724
+ """TextIOWrapper cannot be pickled. This solves it.
725
+
726
+ Requires that ``buffer`` be pickleable, which all instances of
727
+ AbstractBufferedFile are.
728
+ """
729
+
730
+ def __init__(
731
+ self,
732
+ buffer,
733
+ encoding=None,
734
+ errors=None,
735
+ newline=None,
736
+ line_buffering=False,
737
+ write_through=False,
738
+ ):
739
+ self.args = buffer, encoding, errors, newline, line_buffering, write_through
740
+ super().__init__(*self.args)
741
+
742
+ def __reduce__(self):
743
+ return PickleableTextIOWrapper, self.args
venv/lib/python3.13/site-packages/fsspec/dircache.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ from collections.abc import MutableMapping
3
+ from functools import lru_cache
4
+
5
+
6
+ class DirCache(MutableMapping):
7
+ """
8
+ Caching of directory listings, in a structure like::
9
+
10
+ {"path0": [
11
+ {"name": "path0/file0",
12
+ "size": 123,
13
+ "type": "file",
14
+ ...
15
+ },
16
+ {"name": "path0/file1",
17
+ },
18
+ ...
19
+ ],
20
+ "path1": [...]
21
+ }
22
+
23
+ Parameters to this class control listing expiry or indeed turn
24
+ caching off
25
+ """
26
+
27
+ def __init__(
28
+ self,
29
+ use_listings_cache=True,
30
+ listings_expiry_time=None,
31
+ max_paths=None,
32
+ **kwargs,
33
+ ):
34
+ """
35
+
36
+ Parameters
37
+ ----------
38
+ use_listings_cache: bool
39
+ If False, this cache never returns items, but always reports KeyError,
40
+ and setting items has no effect
41
+ listings_expiry_time: int or float (optional)
42
+ Time in seconds that a listing is considered valid. If None,
43
+ listings do not expire.
44
+ max_paths: int (optional)
45
+ The number of most recent listings that are considered valid; 'recent'
46
+ refers to when the entry was set.
47
+ """
48
+ self._cache = {}
49
+ self._times = {}
50
+ if max_paths:
51
+ self._q = lru_cache(max_paths + 1)(lambda key: self._cache.pop(key, None))
52
+ self.use_listings_cache = use_listings_cache
53
+ self.listings_expiry_time = listings_expiry_time
54
+ self.max_paths = max_paths
55
+
56
+ def __getitem__(self, item):
57
+ if self.listings_expiry_time is not None:
58
+ if self._times.get(item, 0) - time.time() < -self.listings_expiry_time:
59
+ del self._cache[item]
60
+ if self.max_paths:
61
+ self._q(item)
62
+ return self._cache[item] # maybe raises KeyError
63
+
64
+ def clear(self):
65
+ self._cache.clear()
66
+
67
+ def __len__(self):
68
+ return len(self._cache)
69
+
70
+ def __contains__(self, item):
71
+ try:
72
+ self[item]
73
+ return True
74
+ except KeyError:
75
+ return False
76
+
77
+ def __setitem__(self, key, value):
78
+ if not self.use_listings_cache:
79
+ return
80
+ if self.max_paths:
81
+ self._q(key)
82
+ self._cache[key] = value
83
+ if self.listings_expiry_time is not None:
84
+ self._times[key] = time.time()
85
+
86
+ def __delitem__(self, key):
87
+ del self._cache[key]
88
+
89
+ def __iter__(self):
90
+ entries = list(self._cache)
91
+
92
+ return (k for k in entries if k in self)
93
+
94
+ def __reduce__(self):
95
+ return (
96
+ DirCache,
97
+ (self.use_listings_cache, self.listings_expiry_time, self.max_paths),
98
+ )
venv/lib/python3.13/site-packages/fsspec/exceptions.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ fsspec user-defined exception classes
3
+ """
4
+
5
+ import asyncio
6
+
7
+
8
+ class BlocksizeMismatchError(ValueError):
9
+ """
10
+ Raised when a cached file is opened with a different blocksize than it was
11
+ written with
12
+ """
13
+
14
+
15
+ class FSTimeoutError(asyncio.TimeoutError):
16
+ """
17
+ Raised when a fsspec function timed out occurs
18
+ """
venv/lib/python3.13/site-packages/fsspec/fuse.py ADDED
@@ -0,0 +1,324 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import logging
3
+ import os
4
+ import stat
5
+ import threading
6
+ import time
7
+ from errno import EIO, ENOENT
8
+
9
+ from fuse import FUSE, FuseOSError, LoggingMixIn, Operations
10
+
11
+ from fsspec import __version__
12
+ from fsspec.core import url_to_fs
13
+
14
+ logger = logging.getLogger("fsspec.fuse")
15
+
16
+
17
+ class FUSEr(Operations):
18
+ def __init__(self, fs, path, ready_file=False):
19
+ self.fs = fs
20
+ self.cache = {}
21
+ self.root = path.rstrip("/") + "/"
22
+ self.counter = 0
23
+ logger.info("Starting FUSE at %s", path)
24
+ self._ready_file = ready_file
25
+
26
+ def getattr(self, path, fh=None):
27
+ logger.debug("getattr %s", path)
28
+ if self._ready_file and path in ["/.fuse_ready", ".fuse_ready"]:
29
+ return {"type": "file", "st_size": 5}
30
+
31
+ path = "".join([self.root, path.lstrip("/")]).rstrip("/")
32
+ try:
33
+ info = self.fs.info(path)
34
+ except FileNotFoundError as exc:
35
+ raise FuseOSError(ENOENT) from exc
36
+
37
+ data = {"st_uid": info.get("uid", 1000), "st_gid": info.get("gid", 1000)}
38
+ perm = info.get("mode", 0o777)
39
+
40
+ if info["type"] != "file":
41
+ data["st_mode"] = stat.S_IFDIR | perm
42
+ data["st_size"] = 0
43
+ data["st_blksize"] = 0
44
+ else:
45
+ data["st_mode"] = stat.S_IFREG | perm
46
+ data["st_size"] = info["size"]
47
+ data["st_blksize"] = 5 * 2**20
48
+ data["st_nlink"] = 1
49
+ data["st_atime"] = info["atime"] if "atime" in info else time.time()
50
+ data["st_ctime"] = info["ctime"] if "ctime" in info else time.time()
51
+ data["st_mtime"] = info["mtime"] if "mtime" in info else time.time()
52
+ return data
53
+
54
+ def readdir(self, path, fh):
55
+ logger.debug("readdir %s", path)
56
+ path = "".join([self.root, path.lstrip("/")])
57
+ files = self.fs.ls(path, False)
58
+ files = [os.path.basename(f.rstrip("/")) for f in files]
59
+ return [".", ".."] + files
60
+
61
+ def mkdir(self, path, mode):
62
+ path = "".join([self.root, path.lstrip("/")])
63
+ self.fs.mkdir(path)
64
+ return 0
65
+
66
+ def rmdir(self, path):
67
+ path = "".join([self.root, path.lstrip("/")])
68
+ self.fs.rmdir(path)
69
+ return 0
70
+
71
+ def read(self, path, size, offset, fh):
72
+ logger.debug("read %s", (path, size, offset))
73
+ if self._ready_file and path in ["/.fuse_ready", ".fuse_ready"]:
74
+ # status indicator
75
+ return b"ready"
76
+
77
+ f = self.cache[fh]
78
+ f.seek(offset)
79
+ out = f.read(size)
80
+ return out
81
+
82
+ def write(self, path, data, offset, fh):
83
+ logger.debug("write %s", (path, offset))
84
+ f = self.cache[fh]
85
+ f.seek(offset)
86
+ f.write(data)
87
+ return len(data)
88
+
89
+ def create(self, path, flags, fi=None):
90
+ logger.debug("create %s", (path, flags))
91
+ fn = "".join([self.root, path.lstrip("/")])
92
+ self.fs.touch(fn) # OS will want to get attributes immediately
93
+ f = self.fs.open(fn, "wb")
94
+ self.cache[self.counter] = f
95
+ self.counter += 1
96
+ return self.counter - 1
97
+
98
+ def open(self, path, flags):
99
+ logger.debug("open %s", (path, flags))
100
+ fn = "".join([self.root, path.lstrip("/")])
101
+ if flags % 2 == 0:
102
+ # read
103
+ mode = "rb"
104
+ else:
105
+ # write/create
106
+ mode = "wb"
107
+ self.cache[self.counter] = self.fs.open(fn, mode)
108
+ self.counter += 1
109
+ return self.counter - 1
110
+
111
+ def truncate(self, path, length, fh=None):
112
+ fn = "".join([self.root, path.lstrip("/")])
113
+ if length != 0:
114
+ raise NotImplementedError
115
+ # maybe should be no-op since open with write sets size to zero anyway
116
+ self.fs.touch(fn)
117
+
118
+ def unlink(self, path):
119
+ fn = "".join([self.root, path.lstrip("/")])
120
+ try:
121
+ self.fs.rm(fn, False)
122
+ except (OSError, FileNotFoundError) as exc:
123
+ raise FuseOSError(EIO) from exc
124
+
125
+ def release(self, path, fh):
126
+ try:
127
+ if fh in self.cache:
128
+ f = self.cache[fh]
129
+ f.close()
130
+ self.cache.pop(fh)
131
+ except Exception as e:
132
+ print(e)
133
+ return 0
134
+
135
+ def chmod(self, path, mode):
136
+ if hasattr(self.fs, "chmod"):
137
+ path = "".join([self.root, path.lstrip("/")])
138
+ return self.fs.chmod(path, mode)
139
+ raise NotImplementedError
140
+
141
+
142
+ def run(
143
+ fs,
144
+ path,
145
+ mount_point,
146
+ foreground=True,
147
+ threads=False,
148
+ ready_file=False,
149
+ ops_class=FUSEr,
150
+ ):
151
+ """Mount stuff in a local directory
152
+
153
+ This uses fusepy to make it appear as if a given path on an fsspec
154
+ instance is in fact resident within the local file-system.
155
+
156
+ This requires that fusepy by installed, and that FUSE be available on
157
+ the system (typically requiring a package to be installed with
158
+ apt, yum, brew, etc.).
159
+
160
+ Parameters
161
+ ----------
162
+ fs: file-system instance
163
+ From one of the compatible implementations
164
+ path: str
165
+ Location on that file-system to regard as the root directory to
166
+ mount. Note that you typically should include the terminating "/"
167
+ character.
168
+ mount_point: str
169
+ An empty directory on the local file-system where the contents of
170
+ the remote path will appear.
171
+ foreground: bool
172
+ Whether or not calling this function will block. Operation will
173
+ typically be more stable if True.
174
+ threads: bool
175
+ Whether or not to create threads when responding to file operations
176
+ within the mounter directory. Operation will typically be more
177
+ stable if False.
178
+ ready_file: bool
179
+ Whether the FUSE process is ready. The ``.fuse_ready`` file will
180
+ exist in the ``mount_point`` directory if True. Debugging purpose.
181
+ ops_class: FUSEr or Subclass of FUSEr
182
+ To override the default behavior of FUSEr. For Example, logging
183
+ to file.
184
+
185
+ """
186
+ func = lambda: FUSE(
187
+ ops_class(fs, path, ready_file=ready_file),
188
+ mount_point,
189
+ nothreads=not threads,
190
+ foreground=foreground,
191
+ )
192
+ if not foreground:
193
+ th = threading.Thread(target=func)
194
+ th.daemon = True
195
+ th.start()
196
+ return th
197
+ else: # pragma: no cover
198
+ try:
199
+ func()
200
+ except KeyboardInterrupt:
201
+ pass
202
+
203
+
204
+ def main(args):
205
+ """Mount filesystem from chained URL to MOUNT_POINT.
206
+
207
+ Examples:
208
+
209
+ python3 -m fsspec.fuse memory /usr/share /tmp/mem
210
+
211
+ python3 -m fsspec.fuse local /tmp/source /tmp/local \\
212
+ -l /tmp/fsspecfuse.log
213
+
214
+ You can also mount chained-URLs and use special settings:
215
+
216
+ python3 -m fsspec.fuse 'filecache::zip::file://data.zip' \\
217
+ / /tmp/zip \\
218
+ -o 'filecache-cache_storage=/tmp/simplecache'
219
+
220
+ You can specify the type of the setting by using `[int]` or `[bool]`,
221
+ (`true`, `yes`, `1` represents the Boolean value `True`):
222
+
223
+ python3 -m fsspec.fuse 'simplecache::ftp://ftp1.at.proftpd.org' \\
224
+ /historic/packages/RPMS /tmp/ftp \\
225
+ -o 'simplecache-cache_storage=/tmp/simplecache' \\
226
+ -o 'simplecache-check_files=false[bool]' \\
227
+ -o 'ftp-listings_expiry_time=60[int]' \\
228
+ -o 'ftp-username=anonymous' \\
229
+ -o 'ftp-password=xieyanbo'
230
+ """
231
+
232
+ class RawDescriptionArgumentParser(argparse.ArgumentParser):
233
+ def format_help(self):
234
+ usage = super().format_help()
235
+ parts = usage.split("\n\n")
236
+ parts[1] = self.description.rstrip()
237
+ return "\n\n".join(parts)
238
+
239
+ parser = RawDescriptionArgumentParser(prog="fsspec.fuse", description=main.__doc__)
240
+ parser.add_argument("--version", action="version", version=__version__)
241
+ parser.add_argument("url", type=str, help="fs url")
242
+ parser.add_argument("source_path", type=str, help="source directory in fs")
243
+ parser.add_argument("mount_point", type=str, help="local directory")
244
+ parser.add_argument(
245
+ "-o",
246
+ "--option",
247
+ action="append",
248
+ help="Any options of protocol included in the chained URL",
249
+ )
250
+ parser.add_argument(
251
+ "-l", "--log-file", type=str, help="Logging FUSE debug info (Default: '')"
252
+ )
253
+ parser.add_argument(
254
+ "-f",
255
+ "--foreground",
256
+ action="store_false",
257
+ help="Running in foreground or not (Default: False)",
258
+ )
259
+ parser.add_argument(
260
+ "-t",
261
+ "--threads",
262
+ action="store_false",
263
+ help="Running with threads support (Default: False)",
264
+ )
265
+ parser.add_argument(
266
+ "-r",
267
+ "--ready-file",
268
+ action="store_false",
269
+ help="The `.fuse_ready` file will exist after FUSE is ready. "
270
+ "(Debugging purpose, Default: False)",
271
+ )
272
+ args = parser.parse_args(args)
273
+
274
+ kwargs = {}
275
+ for item in args.option or []:
276
+ key, sep, value = item.partition("=")
277
+ if not sep:
278
+ parser.error(message=f"Wrong option: {item!r}")
279
+ val = value.lower()
280
+ if val.endswith("[int]"):
281
+ value = int(value[: -len("[int]")])
282
+ elif val.endswith("[bool]"):
283
+ value = val[: -len("[bool]")] in ["1", "yes", "true"]
284
+
285
+ if "-" in key:
286
+ fs_name, setting_name = key.split("-", 1)
287
+ if fs_name in kwargs:
288
+ kwargs[fs_name][setting_name] = value
289
+ else:
290
+ kwargs[fs_name] = {setting_name: value}
291
+ else:
292
+ kwargs[key] = value
293
+
294
+ if args.log_file:
295
+ logging.basicConfig(
296
+ level=logging.DEBUG,
297
+ filename=args.log_file,
298
+ format="%(asctime)s %(message)s",
299
+ )
300
+
301
+ class LoggingFUSEr(FUSEr, LoggingMixIn):
302
+ pass
303
+
304
+ fuser = LoggingFUSEr
305
+ else:
306
+ fuser = FUSEr
307
+
308
+ fs, url_path = url_to_fs(args.url, **kwargs)
309
+ logger.debug("Mounting %s to %s", url_path, str(args.mount_point))
310
+ run(
311
+ fs,
312
+ args.source_path,
313
+ args.mount_point,
314
+ foreground=args.foreground,
315
+ threads=args.threads,
316
+ ready_file=args.ready_file,
317
+ ops_class=fuser,
318
+ )
319
+
320
+
321
+ if __name__ == "__main__":
322
+ import sys
323
+
324
+ main(sys.argv[1:])
venv/lib/python3.13/site-packages/fsspec/generic.py ADDED
@@ -0,0 +1,394 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import inspect
4
+ import logging
5
+ import os
6
+ import shutil
7
+ import uuid
8
+
9
+ from .asyn import AsyncFileSystem, _run_coros_in_chunks, sync_wrapper
10
+ from .callbacks import DEFAULT_CALLBACK
11
+ from .core import filesystem, get_filesystem_class, split_protocol, url_to_fs
12
+
13
+ _generic_fs = {}
14
+ logger = logging.getLogger("fsspec.generic")
15
+
16
+
17
+ def set_generic_fs(protocol, **storage_options):
18
+ """Populate the dict used for method=="generic" lookups"""
19
+ _generic_fs[protocol] = filesystem(protocol, **storage_options)
20
+
21
+
22
+ def _resolve_fs(url, method, protocol=None, storage_options=None):
23
+ """Pick instance of backend FS"""
24
+ url = url[0] if isinstance(url, (list, tuple)) else url
25
+ protocol = protocol or split_protocol(url)[0]
26
+ storage_options = storage_options or {}
27
+ if method == "default":
28
+ return filesystem(protocol)
29
+ if method == "generic":
30
+ return _generic_fs[protocol]
31
+ if method == "current":
32
+ cls = get_filesystem_class(protocol)
33
+ return cls.current()
34
+ if method == "options":
35
+ fs, _ = url_to_fs(url, **storage_options.get(protocol, {}))
36
+ return fs
37
+ raise ValueError(f"Unknown FS resolution method: {method}")
38
+
39
+
40
+ def rsync(
41
+ source,
42
+ destination,
43
+ delete_missing=False,
44
+ source_field="size",
45
+ dest_field="size",
46
+ update_cond="different",
47
+ inst_kwargs=None,
48
+ fs=None,
49
+ **kwargs,
50
+ ):
51
+ """Sync files between two directory trees
52
+
53
+ (experimental)
54
+
55
+ Parameters
56
+ ----------
57
+ source: str
58
+ Root of the directory tree to take files from. This must be a directory, but
59
+ do not include any terminating "/" character
60
+ destination: str
61
+ Root path to copy into. The contents of this location should be
62
+ identical to the contents of ``source`` when done. This will be made a
63
+ directory, and the terminal "/" should not be included.
64
+ delete_missing: bool
65
+ If there are paths in the destination that don't exist in the
66
+ source and this is True, delete them. Otherwise, leave them alone.
67
+ source_field: str | callable
68
+ If ``update_field`` is "different", this is the key in the info
69
+ of source files to consider for difference. Maybe a function of the
70
+ info dict.
71
+ dest_field: str | callable
72
+ If ``update_field`` is "different", this is the key in the info
73
+ of destination files to consider for difference. May be a function of
74
+ the info dict.
75
+ update_cond: "different"|"always"|"never"
76
+ If "always", every file is copied, regardless of whether it exists in
77
+ the destination. If "never", files that exist in the destination are
78
+ not copied again. If "different" (default), only copy if the info
79
+ fields given by ``source_field`` and ``dest_field`` (usually "size")
80
+ are different. Other comparisons may be added in the future.
81
+ inst_kwargs: dict|None
82
+ If ``fs`` is None, use this set of keyword arguments to make a
83
+ GenericFileSystem instance
84
+ fs: GenericFileSystem|None
85
+ Instance to use if explicitly given. The instance defines how to
86
+ to make downstream file system instances from paths.
87
+
88
+ Returns
89
+ -------
90
+ dict of the copy operations that were performed, {source: destination}
91
+ """
92
+ fs = fs or GenericFileSystem(**(inst_kwargs or {}))
93
+ source = fs._strip_protocol(source)
94
+ destination = fs._strip_protocol(destination)
95
+ allfiles = fs.find(source, withdirs=True, detail=True)
96
+ if not fs.isdir(source):
97
+ raise ValueError("Can only rsync on a directory")
98
+ otherfiles = fs.find(destination, withdirs=True, detail=True)
99
+ dirs = [
100
+ a
101
+ for a, v in allfiles.items()
102
+ if v["type"] == "directory" and a.replace(source, destination) not in otherfiles
103
+ ]
104
+ logger.debug(f"{len(dirs)} directories to create")
105
+ if dirs:
106
+ fs.make_many_dirs(
107
+ [dirn.replace(source, destination) for dirn in dirs], exist_ok=True
108
+ )
109
+ allfiles = {a: v for a, v in allfiles.items() if v["type"] == "file"}
110
+ logger.debug(f"{len(allfiles)} files to consider for copy")
111
+ to_delete = [
112
+ o
113
+ for o, v in otherfiles.items()
114
+ if o.replace(destination, source) not in allfiles and v["type"] == "file"
115
+ ]
116
+ for k, v in allfiles.copy().items():
117
+ otherfile = k.replace(source, destination)
118
+ if otherfile in otherfiles:
119
+ if update_cond == "always":
120
+ allfiles[k] = otherfile
121
+ elif update_cond == "different":
122
+ inf1 = source_field(v) if callable(source_field) else v[source_field]
123
+ v2 = otherfiles[otherfile]
124
+ inf2 = dest_field(v2) if callable(dest_field) else v2[dest_field]
125
+ if inf1 != inf2:
126
+ # details mismatch, make copy
127
+ allfiles[k] = otherfile
128
+ else:
129
+ # details match, don't copy
130
+ allfiles.pop(k)
131
+ else:
132
+ # file not in target yet
133
+ allfiles[k] = otherfile
134
+ logger.debug(f"{len(allfiles)} files to copy")
135
+ if allfiles:
136
+ source_files, target_files = zip(*allfiles.items())
137
+ fs.cp(source_files, target_files, **kwargs)
138
+ logger.debug(f"{len(to_delete)} files to delete")
139
+ if delete_missing and to_delete:
140
+ fs.rm(to_delete)
141
+ return allfiles
142
+
143
+
144
+ class GenericFileSystem(AsyncFileSystem):
145
+ """Wrapper over all other FS types
146
+
147
+ <experimental!>
148
+
149
+ This implementation is a single unified interface to be able to run FS operations
150
+ over generic URLs, and dispatch to the specific implementations using the URL
151
+ protocol prefix.
152
+
153
+ Note: instances of this FS are always async, even if you never use it with any async
154
+ backend.
155
+ """
156
+
157
+ protocol = "generic" # there is no real reason to ever use a protocol with this FS
158
+
159
+ def __init__(self, default_method="default", storage_options=None, **kwargs):
160
+ """
161
+
162
+ Parameters
163
+ ----------
164
+ default_method: str (optional)
165
+ Defines how to configure backend FS instances. Options are:
166
+ - "default": instantiate like FSClass(), with no
167
+ extra arguments; this is the default instance of that FS, and can be
168
+ configured via the config system
169
+ - "generic": takes instances from the `_generic_fs` dict in this module,
170
+ which you must populate before use. Keys are by protocol
171
+ - "options": expects storage_options, a dict mapping protocol to
172
+ kwargs to use when constructing the filesystem
173
+ - "current": takes the most recently instantiated version of each FS
174
+ """
175
+ self.method = default_method
176
+ self.st_opts = storage_options
177
+ super().__init__(**kwargs)
178
+
179
+ def _parent(self, path):
180
+ fs = _resolve_fs(path, self.method, storage_options=self.st_opts)
181
+ return fs.unstrip_protocol(fs._parent(path))
182
+
183
+ def _strip_protocol(self, path):
184
+ # normalization only
185
+ fs = _resolve_fs(path, self.method, storage_options=self.st_opts)
186
+ return fs.unstrip_protocol(fs._strip_protocol(path))
187
+
188
+ async def _find(self, path, maxdepth=None, withdirs=False, detail=False, **kwargs):
189
+ fs = _resolve_fs(path, self.method, storage_options=self.st_opts)
190
+ if fs.async_impl:
191
+ out = await fs._find(
192
+ path, maxdepth=maxdepth, withdirs=withdirs, detail=True, **kwargs
193
+ )
194
+ else:
195
+ out = fs.find(
196
+ path, maxdepth=maxdepth, withdirs=withdirs, detail=True, **kwargs
197
+ )
198
+ result = {}
199
+ for k, v in out.items():
200
+ v = v.copy() # don't corrupt target FS dircache
201
+ name = fs.unstrip_protocol(k)
202
+ v["name"] = name
203
+ result[name] = v
204
+ if detail:
205
+ return result
206
+ return list(result)
207
+
208
+ async def _info(self, url, **kwargs):
209
+ fs = _resolve_fs(url, self.method)
210
+ if fs.async_impl:
211
+ out = await fs._info(url, **kwargs)
212
+ else:
213
+ out = fs.info(url, **kwargs)
214
+ out = out.copy() # don't edit originals
215
+ out["name"] = fs.unstrip_protocol(out["name"])
216
+ return out
217
+
218
+ async def _ls(
219
+ self,
220
+ url,
221
+ detail=True,
222
+ **kwargs,
223
+ ):
224
+ fs = _resolve_fs(url, self.method)
225
+ if fs.async_impl:
226
+ out = await fs._ls(url, detail=True, **kwargs)
227
+ else:
228
+ out = fs.ls(url, detail=True, **kwargs)
229
+ out = [o.copy() for o in out] # don't edit originals
230
+ for o in out:
231
+ o["name"] = fs.unstrip_protocol(o["name"])
232
+ if detail:
233
+ return out
234
+ else:
235
+ return [o["name"] for o in out]
236
+
237
+ async def _cat_file(
238
+ self,
239
+ url,
240
+ **kwargs,
241
+ ):
242
+ fs = _resolve_fs(url, self.method)
243
+ if fs.async_impl:
244
+ return await fs._cat_file(url, **kwargs)
245
+ else:
246
+ return fs.cat_file(url, **kwargs)
247
+
248
+ async def _pipe_file(
249
+ self,
250
+ path,
251
+ value,
252
+ **kwargs,
253
+ ):
254
+ fs = _resolve_fs(path, self.method, storage_options=self.st_opts)
255
+ if fs.async_impl:
256
+ return await fs._pipe_file(path, value, **kwargs)
257
+ else:
258
+ return fs.pipe_file(path, value, **kwargs)
259
+
260
+ async def _rm(self, url, **kwargs):
261
+ urls = url
262
+ if isinstance(urls, str):
263
+ urls = [urls]
264
+ fs = _resolve_fs(urls[0], self.method)
265
+ if fs.async_impl:
266
+ await fs._rm(urls, **kwargs)
267
+ else:
268
+ fs.rm(url, **kwargs)
269
+
270
+ async def _makedirs(self, path, exist_ok=False):
271
+ logger.debug("Make dir %s", path)
272
+ fs = _resolve_fs(path, self.method, storage_options=self.st_opts)
273
+ if fs.async_impl:
274
+ await fs._makedirs(path, exist_ok=exist_ok)
275
+ else:
276
+ fs.makedirs(path, exist_ok=exist_ok)
277
+
278
+ def rsync(self, source, destination, **kwargs):
279
+ """Sync files between two directory trees
280
+
281
+ See `func:rsync` for more details.
282
+ """
283
+ rsync(source, destination, fs=self, **kwargs)
284
+
285
+ async def _cp_file(
286
+ self,
287
+ url,
288
+ url2,
289
+ blocksize=2**20,
290
+ callback=DEFAULT_CALLBACK,
291
+ tempdir: str | None = None,
292
+ **kwargs,
293
+ ):
294
+ fs = _resolve_fs(url, self.method)
295
+ fs2 = _resolve_fs(url2, self.method)
296
+ if fs is fs2:
297
+ # pure remote
298
+ if fs.async_impl:
299
+ return await fs._copy(url, url2, **kwargs)
300
+ else:
301
+ return fs.copy(url, url2, **kwargs)
302
+ await copy_file_op(fs, [url], fs2, [url2], tempdir, 1, on_error="raise")
303
+
304
+ async def _make_many_dirs(self, urls, exist_ok=True):
305
+ fs = _resolve_fs(urls[0], self.method)
306
+ if fs.async_impl:
307
+ coros = [fs._makedirs(u, exist_ok=exist_ok) for u in urls]
308
+ await _run_coros_in_chunks(coros)
309
+ else:
310
+ for u in urls:
311
+ fs.makedirs(u, exist_ok=exist_ok)
312
+
313
+ make_many_dirs = sync_wrapper(_make_many_dirs)
314
+
315
+ async def _copy(
316
+ self,
317
+ path1: list[str],
318
+ path2: list[str],
319
+ recursive: bool = False,
320
+ on_error: str = "ignore",
321
+ maxdepth: int | None = None,
322
+ batch_size: int | None = None,
323
+ tempdir: str | None = None,
324
+ **kwargs,
325
+ ):
326
+ # TODO: special case for one FS being local, which can use get/put
327
+ # TODO: special case for one being memFS, which can use cat/pipe
328
+ if recursive:
329
+ raise NotImplementedError("Please use fsspec.generic.rsync")
330
+ path1 = [path1] if isinstance(path1, str) else path1
331
+ path2 = [path2] if isinstance(path2, str) else path2
332
+
333
+ fs = _resolve_fs(path1, self.method)
334
+ fs2 = _resolve_fs(path2, self.method)
335
+
336
+ if fs is fs2:
337
+ if fs.async_impl:
338
+ return await fs._copy(path1, path2, **kwargs)
339
+ else:
340
+ return fs.copy(path1, path2, **kwargs)
341
+
342
+ await copy_file_op(
343
+ fs, path1, fs2, path2, tempdir, batch_size, on_error=on_error
344
+ )
345
+
346
+
347
+ async def copy_file_op(
348
+ fs1, url1, fs2, url2, tempdir=None, batch_size=20, on_error="ignore"
349
+ ):
350
+ import tempfile
351
+
352
+ tempdir = tempdir or tempfile.mkdtemp()
353
+ try:
354
+ coros = [
355
+ _copy_file_op(
356
+ fs1,
357
+ u1,
358
+ fs2,
359
+ u2,
360
+ os.path.join(tempdir, uuid.uuid4().hex),
361
+ )
362
+ for u1, u2 in zip(url1, url2)
363
+ ]
364
+ out = await _run_coros_in_chunks(
365
+ coros, batch_size=batch_size, return_exceptions=True
366
+ )
367
+ finally:
368
+ shutil.rmtree(tempdir)
369
+ if on_error == "return":
370
+ return out
371
+ elif on_error == "raise":
372
+ for o in out:
373
+ if isinstance(o, Exception):
374
+ raise o
375
+
376
+
377
+ async def _copy_file_op(fs1, url1, fs2, url2, local, on_error="ignore"):
378
+ if fs1.async_impl:
379
+ await fs1._get_file(url1, local)
380
+ else:
381
+ fs1.get_file(url1, local)
382
+ if fs2.async_impl:
383
+ await fs2._put_file(local, url2)
384
+ else:
385
+ fs2.put_file(local, url2)
386
+ os.unlink(local)
387
+ logger.debug("Copy %s -> %s; done", url1, url2)
388
+
389
+
390
+ async def maybe_await(cor):
391
+ if inspect.iscoroutine(cor):
392
+ return await cor
393
+ else:
394
+ return cor
venv/lib/python3.13/site-packages/fsspec/gui.py ADDED
@@ -0,0 +1,417 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ast
2
+ import contextlib
3
+ import logging
4
+ import os
5
+ import re
6
+ from collections.abc import Sequence
7
+ from typing import ClassVar
8
+
9
+ import panel as pn
10
+
11
+ from .core import OpenFile, get_filesystem_class, split_protocol
12
+ from .registry import known_implementations
13
+
14
+ pn.extension()
15
+ logger = logging.getLogger("fsspec.gui")
16
+
17
+
18
+ class SigSlot:
19
+ """Signal-slot mixin, for Panel event passing
20
+
21
+ Include this class in a widget manager's superclasses to be able to
22
+ register events and callbacks on Panel widgets managed by that class.
23
+
24
+ The method ``_register`` should be called as widgets are added, and external
25
+ code should call ``connect`` to associate callbacks.
26
+
27
+ By default, all signals emit a DEBUG logging statement.
28
+ """
29
+
30
+ # names of signals that this class may emit each of which must be
31
+ # set by _register for any new instance
32
+ signals: ClassVar[Sequence[str]] = []
33
+ # names of actions that this class may respond to
34
+ slots: ClassVar[Sequence[str]] = []
35
+
36
+ # each of which must be a method name
37
+
38
+ def __init__(self):
39
+ self._ignoring_events = False
40
+ self._sigs = {}
41
+ self._map = {}
42
+ self._setup()
43
+
44
+ def _setup(self):
45
+ """Create GUI elements and register signals"""
46
+ self.panel = pn.pane.PaneBase()
47
+ # no signals to set up in the base class
48
+
49
+ def _register(
50
+ self, widget, name, thing="value", log_level=logging.DEBUG, auto=False
51
+ ):
52
+ """Watch the given attribute of a widget and assign it a named event
53
+
54
+ This is normally called at the time a widget is instantiated, in the
55
+ class which owns it.
56
+
57
+ Parameters
58
+ ----------
59
+ widget : pn.layout.Panel or None
60
+ Widget to watch. If None, an anonymous signal not associated with
61
+ any widget.
62
+ name : str
63
+ Name of this event
64
+ thing : str
65
+ Attribute of the given widget to watch
66
+ log_level : int
67
+ When the signal is triggered, a logging event of the given level
68
+ will be fired in the dfviz logger.
69
+ auto : bool
70
+ If True, automatically connects with a method in this class of the
71
+ same name.
72
+ """
73
+ if name not in self.signals:
74
+ raise ValueError(f"Attempt to assign an undeclared signal: {name}")
75
+ self._sigs[name] = {
76
+ "widget": widget,
77
+ "callbacks": [],
78
+ "thing": thing,
79
+ "log": log_level,
80
+ }
81
+ wn = "-".join(
82
+ [
83
+ getattr(widget, "name", str(widget)) if widget is not None else "none",
84
+ thing,
85
+ ]
86
+ )
87
+ self._map[wn] = name
88
+ if widget is not None:
89
+ widget.param.watch(self._signal, thing, onlychanged=True)
90
+ if auto and hasattr(self, name):
91
+ self.connect(name, getattr(self, name))
92
+
93
+ def _repr_mimebundle_(self, *args, **kwargs):
94
+ """Display in a notebook or a server"""
95
+ try:
96
+ return self.panel._repr_mimebundle_(*args, **kwargs)
97
+ except (ValueError, AttributeError) as exc:
98
+ raise NotImplementedError(
99
+ "Panel does not seem to be set up properly"
100
+ ) from exc
101
+
102
+ def connect(self, signal, slot):
103
+ """Associate call back with given event
104
+
105
+ The callback must be a function which takes the "new" value of the
106
+ watched attribute as the only parameter. If the callback return False,
107
+ this cancels any further processing of the given event.
108
+
109
+ Alternatively, the callback can be a string, in which case it means
110
+ emitting the correspondingly-named event (i.e., connect to self)
111
+ """
112
+ self._sigs[signal]["callbacks"].append(slot)
113
+
114
+ def _signal(self, event):
115
+ """This is called by a an action on a widget
116
+
117
+ Within an self.ignore_events context, nothing happens.
118
+
119
+ Tests can execute this method by directly changing the values of
120
+ widget components.
121
+ """
122
+ if not self._ignoring_events:
123
+ wn = "-".join([event.obj.name, event.name])
124
+ if wn in self._map and self._map[wn] in self._sigs:
125
+ self._emit(self._map[wn], event.new)
126
+
127
+ @contextlib.contextmanager
128
+ def ignore_events(self):
129
+ """Temporarily turn off events processing in this instance
130
+
131
+ (does not propagate to children)
132
+ """
133
+ self._ignoring_events = True
134
+ try:
135
+ yield
136
+ finally:
137
+ self._ignoring_events = False
138
+
139
+ def _emit(self, sig, value=None):
140
+ """An event happened, call its callbacks
141
+
142
+ This method can be used in tests to simulate message passing without
143
+ directly changing visual elements.
144
+
145
+ Calling of callbacks will halt whenever one returns False.
146
+ """
147
+ logger.log(self._sigs[sig]["log"], f"{sig}: {value}")
148
+ for callback in self._sigs[sig]["callbacks"]:
149
+ if isinstance(callback, str):
150
+ self._emit(callback)
151
+ else:
152
+ try:
153
+ # running callbacks should not break the interface
154
+ ret = callback(value)
155
+ if ret is False:
156
+ break
157
+ except Exception as e:
158
+ logger.exception(
159
+ "Exception (%s) while executing callback for signal: %s",
160
+ e,
161
+ sig,
162
+ )
163
+
164
+ def show(self, threads=False):
165
+ """Open a new browser tab and display this instance's interface"""
166
+ self.panel.show(threads=threads, verbose=False)
167
+ return self
168
+
169
+
170
+ class SingleSelect(SigSlot):
171
+ """A multiselect which only allows you to select one item for an event"""
172
+
173
+ signals = ["_selected", "selected"] # the first is internal
174
+ slots = ["set_options", "set_selection", "add", "clear", "select"]
175
+
176
+ def __init__(self, **kwargs):
177
+ self.kwargs = kwargs
178
+ super().__init__()
179
+
180
+ def _setup(self):
181
+ self.panel = pn.widgets.MultiSelect(**self.kwargs)
182
+ self._register(self.panel, "_selected", "value")
183
+ self._register(None, "selected")
184
+ self.connect("_selected", self.select_one)
185
+
186
+ def _signal(self, *args, **kwargs):
187
+ super()._signal(*args, **kwargs)
188
+
189
+ def select_one(self, *_):
190
+ with self.ignore_events():
191
+ val = [self.panel.value[-1]] if self.panel.value else []
192
+ self.panel.value = val
193
+ self._emit("selected", self.panel.value)
194
+
195
+ def set_options(self, options):
196
+ self.panel.options = options
197
+
198
+ def clear(self):
199
+ self.panel.options = []
200
+
201
+ @property
202
+ def value(self):
203
+ return self.panel.value
204
+
205
+ def set_selection(self, selection):
206
+ self.panel.value = [selection]
207
+
208
+
209
+ class FileSelector(SigSlot):
210
+ """Panel-based graphical file selector widget
211
+
212
+ Instances of this widget are interactive and can be displayed in jupyter by having
213
+ them as the output of a cell, or in a separate browser tab using ``.show()``.
214
+ """
215
+
216
+ signals = [
217
+ "protocol_changed",
218
+ "selection_changed",
219
+ "directory_entered",
220
+ "home_clicked",
221
+ "up_clicked",
222
+ "go_clicked",
223
+ "filters_changed",
224
+ ]
225
+ slots = ["set_filters", "go_home"]
226
+
227
+ def __init__(self, url=None, filters=None, ignore=None, kwargs=None):
228
+ """
229
+
230
+ Parameters
231
+ ----------
232
+ url : str (optional)
233
+ Initial value of the URL to populate the dialog; should include protocol
234
+ filters : list(str) (optional)
235
+ File endings to include in the listings. If not included, all files are
236
+ allowed. Does not affect directories.
237
+ If given, the endings will appear as checkboxes in the interface
238
+ ignore : list(str) (optional)
239
+ Regex(s) of file basename patterns to ignore, e.g., "\\." for typical
240
+ hidden files on posix
241
+ kwargs : dict (optional)
242
+ To pass to file system instance
243
+ """
244
+ if url:
245
+ self.init_protocol, url = split_protocol(url)
246
+ else:
247
+ self.init_protocol, url = "file", os.getcwd()
248
+ self.init_url = url
249
+ self.init_kwargs = (kwargs if isinstance(kwargs, str) else str(kwargs)) or "{}"
250
+ self.filters = filters
251
+ self.ignore = [re.compile(i) for i in ignore or []]
252
+ self._fs = None
253
+ super().__init__()
254
+
255
+ def _setup(self):
256
+ self.url = pn.widgets.TextInput(
257
+ name="url",
258
+ value=self.init_url,
259
+ align="end",
260
+ sizing_mode="stretch_width",
261
+ width_policy="max",
262
+ )
263
+ self.protocol = pn.widgets.Select(
264
+ options=sorted(known_implementations),
265
+ value=self.init_protocol,
266
+ name="protocol",
267
+ align="center",
268
+ )
269
+ self.kwargs = pn.widgets.TextInput(
270
+ name="kwargs", value=self.init_kwargs, align="center"
271
+ )
272
+ self.go = pn.widgets.Button(name="⇨", align="end", width=45)
273
+ self.main = SingleSelect(size=10)
274
+ self.home = pn.widgets.Button(name="🏠", width=40, height=30, align="end")
275
+ self.up = pn.widgets.Button(name="‹", width=30, height=30, align="end")
276
+
277
+ self._register(self.protocol, "protocol_changed", auto=True)
278
+ self._register(self.go, "go_clicked", "clicks", auto=True)
279
+ self._register(self.up, "up_clicked", "clicks", auto=True)
280
+ self._register(self.home, "home_clicked", "clicks", auto=True)
281
+ self._register(None, "selection_changed")
282
+ self.main.connect("selected", self.selection_changed)
283
+ self._register(None, "directory_entered")
284
+ self.prev_protocol = self.protocol.value
285
+ self.prev_kwargs = self.storage_options
286
+
287
+ self.filter_sel = pn.widgets.CheckBoxGroup(
288
+ value=[], options=[], inline=False, align="end", width_policy="min"
289
+ )
290
+ self._register(self.filter_sel, "filters_changed", auto=True)
291
+
292
+ self.panel = pn.Column(
293
+ pn.Row(self.protocol, self.kwargs),
294
+ pn.Row(self.home, self.up, self.url, self.go, self.filter_sel),
295
+ self.main.panel,
296
+ )
297
+ self.set_filters(self.filters)
298
+ self.go_clicked()
299
+
300
+ def set_filters(self, filters=None):
301
+ self.filters = filters
302
+ if filters:
303
+ self.filter_sel.options = filters
304
+ self.filter_sel.value = filters
305
+ else:
306
+ self.filter_sel.options = []
307
+ self.filter_sel.value = []
308
+
309
+ @property
310
+ def storage_options(self):
311
+ """Value of the kwargs box as a dictionary"""
312
+ return ast.literal_eval(self.kwargs.value) or {}
313
+
314
+ @property
315
+ def fs(self):
316
+ """Current filesystem instance"""
317
+ if self._fs is None:
318
+ cls = get_filesystem_class(self.protocol.value)
319
+ self._fs = cls(**self.storage_options)
320
+ return self._fs
321
+
322
+ @property
323
+ def urlpath(self):
324
+ """URL of currently selected item"""
325
+ return (
326
+ (f"{self.protocol.value}://{self.main.value[0]}")
327
+ if self.main.value
328
+ else None
329
+ )
330
+
331
+ def open_file(self, mode="rb", compression=None, encoding=None):
332
+ """Create OpenFile instance for the currently selected item
333
+
334
+ For example, in a notebook you might do something like
335
+
336
+ .. code-block::
337
+
338
+ [ ]: sel = FileSelector(); sel
339
+
340
+ # user selects their file
341
+
342
+ [ ]: with sel.open_file('rb') as f:
343
+ ... out = f.read()
344
+
345
+ Parameters
346
+ ----------
347
+ mode: str (optional)
348
+ Open mode for the file.
349
+ compression: str (optional)
350
+ The interact with the file as compressed. Set to 'infer' to guess
351
+ compression from the file ending
352
+ encoding: str (optional)
353
+ If using text mode, use this encoding; defaults to UTF8.
354
+ """
355
+ if self.urlpath is None:
356
+ raise ValueError("No file selected")
357
+ return OpenFile(self.fs, self.urlpath, mode, compression, encoding)
358
+
359
+ def filters_changed(self, values):
360
+ self.filters = values
361
+ self.go_clicked()
362
+
363
+ def selection_changed(self, *_):
364
+ if self.urlpath is None:
365
+ return
366
+ if self.fs.isdir(self.urlpath):
367
+ self.url.value = self.fs._strip_protocol(self.urlpath)
368
+ self.go_clicked()
369
+
370
+ def go_clicked(self, *_):
371
+ if (
372
+ self.prev_protocol != self.protocol.value
373
+ or self.prev_kwargs != self.storage_options
374
+ ):
375
+ self._fs = None # causes fs to be recreated
376
+ self.prev_protocol = self.protocol.value
377
+ self.prev_kwargs = self.storage_options
378
+ listing = sorted(
379
+ self.fs.ls(self.url.value, detail=True), key=lambda x: x["name"]
380
+ )
381
+ listing = [
382
+ l
383
+ for l in listing
384
+ if not any(i.match(l["name"].rsplit("/", 1)[-1]) for i in self.ignore)
385
+ ]
386
+ folders = {
387
+ "📁 " + o["name"].rsplit("/", 1)[-1]: o["name"]
388
+ for o in listing
389
+ if o["type"] == "directory"
390
+ }
391
+ files = {
392
+ "📄 " + o["name"].rsplit("/", 1)[-1]: o["name"]
393
+ for o in listing
394
+ if o["type"] == "file"
395
+ }
396
+ if self.filters:
397
+ files = {
398
+ k: v
399
+ for k, v in files.items()
400
+ if any(v.endswith(ext) for ext in self.filters)
401
+ }
402
+ self.main.set_options(dict(**folders, **files))
403
+
404
+ def protocol_changed(self, *_):
405
+ self._fs = None
406
+ self.main.options = []
407
+ self.url.value = ""
408
+
409
+ def home_clicked(self, *_):
410
+ self.protocol.value = self.init_protocol
411
+ self.kwargs.value = self.init_kwargs
412
+ self.url.value = self.init_url
413
+ self.go_clicked()
414
+
415
+ def up_clicked(self, *_):
416
+ self.url.value = self.fs._parent(self.url.value)
417
+ self.go_clicked()
venv/lib/python3.13/site-packages/fsspec/json.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from collections.abc import Mapping, Sequence
3
+ from contextlib import suppress
4
+ from pathlib import PurePath
5
+ from typing import (
6
+ Any,
7
+ Callable,
8
+ ClassVar,
9
+ Optional,
10
+ )
11
+
12
+ from .registry import _import_class, get_filesystem_class
13
+ from .spec import AbstractFileSystem
14
+
15
+
16
+ class FilesystemJSONEncoder(json.JSONEncoder):
17
+ include_password: ClassVar[bool] = True
18
+
19
+ def default(self, o: Any) -> Any:
20
+ if isinstance(o, AbstractFileSystem):
21
+ return o.to_dict(include_password=self.include_password)
22
+ if isinstance(o, PurePath):
23
+ cls = type(o)
24
+ return {"cls": f"{cls.__module__}.{cls.__name__}", "str": str(o)}
25
+
26
+ return super().default(o)
27
+
28
+ def make_serializable(self, obj: Any) -> Any:
29
+ """
30
+ Recursively converts an object so that it can be JSON serialized via
31
+ :func:`json.dumps` and :func:`json.dump`, without actually calling
32
+ said functions.
33
+ """
34
+ if isinstance(obj, (str, int, float, bool)):
35
+ return obj
36
+ if isinstance(obj, Mapping):
37
+ return {k: self.make_serializable(v) for k, v in obj.items()}
38
+ if isinstance(obj, Sequence):
39
+ return [self.make_serializable(v) for v in obj]
40
+
41
+ return self.default(obj)
42
+
43
+
44
+ class FilesystemJSONDecoder(json.JSONDecoder):
45
+ def __init__(
46
+ self,
47
+ *,
48
+ object_hook: Optional[Callable[[dict[str, Any]], Any]] = None,
49
+ parse_float: Optional[Callable[[str], Any]] = None,
50
+ parse_int: Optional[Callable[[str], Any]] = None,
51
+ parse_constant: Optional[Callable[[str], Any]] = None,
52
+ strict: bool = True,
53
+ object_pairs_hook: Optional[Callable[[list[tuple[str, Any]]], Any]] = None,
54
+ ) -> None:
55
+ self.original_object_hook = object_hook
56
+
57
+ super().__init__(
58
+ object_hook=self.custom_object_hook,
59
+ parse_float=parse_float,
60
+ parse_int=parse_int,
61
+ parse_constant=parse_constant,
62
+ strict=strict,
63
+ object_pairs_hook=object_pairs_hook,
64
+ )
65
+
66
+ @classmethod
67
+ def try_resolve_path_cls(cls, dct: dict[str, Any]):
68
+ with suppress(Exception):
69
+ fqp = dct["cls"]
70
+
71
+ path_cls = _import_class(fqp)
72
+
73
+ if issubclass(path_cls, PurePath):
74
+ return path_cls
75
+
76
+ return None
77
+
78
+ @classmethod
79
+ def try_resolve_fs_cls(cls, dct: dict[str, Any]):
80
+ with suppress(Exception):
81
+ if "cls" in dct:
82
+ try:
83
+ fs_cls = _import_class(dct["cls"])
84
+ if issubclass(fs_cls, AbstractFileSystem):
85
+ return fs_cls
86
+ except Exception:
87
+ if "protocol" in dct: # Fallback if cls cannot be imported
88
+ return get_filesystem_class(dct["protocol"])
89
+
90
+ raise
91
+
92
+ return None
93
+
94
+ def custom_object_hook(self, dct: dict[str, Any]):
95
+ if "cls" in dct:
96
+ if (obj_cls := self.try_resolve_fs_cls(dct)) is not None:
97
+ return AbstractFileSystem.from_dict(dct)
98
+ if (obj_cls := self.try_resolve_path_cls(dct)) is not None:
99
+ return obj_cls(dct["str"])
100
+
101
+ if self.original_object_hook is not None:
102
+ return self.original_object_hook(dct)
103
+
104
+ return dct
105
+
106
+ def unmake_serializable(self, obj: Any) -> Any:
107
+ """
108
+ Inverse function of :meth:`FilesystemJSONEncoder.make_serializable`.
109
+ """
110
+ if isinstance(obj, dict):
111
+ obj = self.custom_object_hook(obj)
112
+ if isinstance(obj, dict):
113
+ return {k: self.unmake_serializable(v) for k, v in obj.items()}
114
+ if isinstance(obj, (list, tuple)):
115
+ return [self.unmake_serializable(v) for v in obj]
116
+
117
+ return obj
venv/lib/python3.13/site-packages/fsspec/mapping.py ADDED
@@ -0,0 +1,251 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import array
2
+ import logging
3
+ import posixpath
4
+ import warnings
5
+ from collections.abc import MutableMapping
6
+ from functools import cached_property
7
+
8
+ from fsspec.core import url_to_fs
9
+
10
+ logger = logging.getLogger("fsspec.mapping")
11
+
12
+
13
+ class FSMap(MutableMapping):
14
+ """Wrap a FileSystem instance as a mutable wrapping.
15
+
16
+ The keys of the mapping become files under the given root, and the
17
+ values (which must be bytes) the contents of those files.
18
+
19
+ Parameters
20
+ ----------
21
+ root: string
22
+ prefix for all the files
23
+ fs: FileSystem instance
24
+ check: bool (=True)
25
+ performs a touch at the location, to check for write access.
26
+
27
+ Examples
28
+ --------
29
+ >>> fs = FileSystem(**parameters) # doctest: +SKIP
30
+ >>> d = FSMap('my-data/path/', fs) # doctest: +SKIP
31
+ or, more likely
32
+ >>> d = fs.get_mapper('my-data/path/')
33
+
34
+ >>> d['loc1'] = b'Hello World' # doctest: +SKIP
35
+ >>> list(d.keys()) # doctest: +SKIP
36
+ ['loc1']
37
+ >>> d['loc1'] # doctest: +SKIP
38
+ b'Hello World'
39
+ """
40
+
41
+ def __init__(self, root, fs, check=False, create=False, missing_exceptions=None):
42
+ self.fs = fs
43
+ self.root = fs._strip_protocol(root)
44
+ self._root_key_to_str = fs._strip_protocol(posixpath.join(root, "x"))[:-1]
45
+ if missing_exceptions is None:
46
+ missing_exceptions = (
47
+ FileNotFoundError,
48
+ IsADirectoryError,
49
+ NotADirectoryError,
50
+ )
51
+ self.missing_exceptions = missing_exceptions
52
+ self.check = check
53
+ self.create = create
54
+ if create:
55
+ if not self.fs.exists(root):
56
+ self.fs.mkdir(root)
57
+ if check:
58
+ if not self.fs.exists(root):
59
+ raise ValueError(
60
+ f"Path {root} does not exist. Create "
61
+ f" with the ``create=True`` keyword"
62
+ )
63
+ self.fs.touch(root + "/a")
64
+ self.fs.rm(root + "/a")
65
+
66
+ @cached_property
67
+ def dirfs(self):
68
+ """dirfs instance that can be used with the same keys as the mapper"""
69
+ from .implementations.dirfs import DirFileSystem
70
+
71
+ return DirFileSystem(path=self._root_key_to_str, fs=self.fs)
72
+
73
+ def clear(self):
74
+ """Remove all keys below root - empties out mapping"""
75
+ logger.info("Clear mapping at %s", self.root)
76
+ try:
77
+ self.fs.rm(self.root, True)
78
+ self.fs.mkdir(self.root)
79
+ except: # noqa: E722
80
+ pass
81
+
82
+ def getitems(self, keys, on_error="raise"):
83
+ """Fetch multiple items from the store
84
+
85
+ If the backend is async-able, this might proceed concurrently
86
+
87
+ Parameters
88
+ ----------
89
+ keys: list(str)
90
+ They keys to be fetched
91
+ on_error : "raise", "omit", "return"
92
+ If raise, an underlying exception will be raised (converted to KeyError
93
+ if the type is in self.missing_exceptions); if omit, keys with exception
94
+ will simply not be included in the output; if "return", all keys are
95
+ included in the output, but the value will be bytes or an exception
96
+ instance.
97
+
98
+ Returns
99
+ -------
100
+ dict(key, bytes|exception)
101
+ """
102
+ keys2 = [self._key_to_str(k) for k in keys]
103
+ oe = on_error if on_error == "raise" else "return"
104
+ try:
105
+ out = self.fs.cat(keys2, on_error=oe)
106
+ if isinstance(out, bytes):
107
+ out = {keys2[0]: out}
108
+ except self.missing_exceptions as e:
109
+ raise KeyError from e
110
+ out = {
111
+ k: (KeyError() if isinstance(v, self.missing_exceptions) else v)
112
+ for k, v in out.items()
113
+ }
114
+ return {
115
+ key: out[k2] if on_error == "raise" else out.get(k2, KeyError(k2))
116
+ for key, k2 in zip(keys, keys2)
117
+ if on_error == "return" or not isinstance(out[k2], BaseException)
118
+ }
119
+
120
+ def setitems(self, values_dict):
121
+ """Set the values of multiple items in the store
122
+
123
+ Parameters
124
+ ----------
125
+ values_dict: dict(str, bytes)
126
+ """
127
+ values = {self._key_to_str(k): maybe_convert(v) for k, v in values_dict.items()}
128
+ self.fs.pipe(values)
129
+
130
+ def delitems(self, keys):
131
+ """Remove multiple keys from the store"""
132
+ self.fs.rm([self._key_to_str(k) for k in keys])
133
+
134
+ def _key_to_str(self, key):
135
+ """Generate full path for the key"""
136
+ if not isinstance(key, str):
137
+ # raise TypeError("key must be of type `str`, got `{type(key).__name__}`"
138
+ warnings.warn(
139
+ "from fsspec 2023.5 onward FSMap non-str keys will raise TypeError",
140
+ DeprecationWarning,
141
+ )
142
+ if isinstance(key, list):
143
+ key = tuple(key)
144
+ key = str(key)
145
+ return f"{self._root_key_to_str}{key}".rstrip("/")
146
+
147
+ def _str_to_key(self, s):
148
+ """Strip path of to leave key name"""
149
+ return s[len(self.root) :].lstrip("/")
150
+
151
+ def __getitem__(self, key, default=None):
152
+ """Retrieve data"""
153
+ k = self._key_to_str(key)
154
+ try:
155
+ result = self.fs.cat(k)
156
+ except self.missing_exceptions as exc:
157
+ if default is not None:
158
+ return default
159
+ raise KeyError(key) from exc
160
+ return result
161
+
162
+ def pop(self, key, default=None):
163
+ """Pop data"""
164
+ result = self.__getitem__(key, default)
165
+ try:
166
+ del self[key]
167
+ except KeyError:
168
+ pass
169
+ return result
170
+
171
+ def __setitem__(self, key, value):
172
+ """Store value in key"""
173
+ key = self._key_to_str(key)
174
+ self.fs.mkdirs(self.fs._parent(key), exist_ok=True)
175
+ self.fs.pipe_file(key, maybe_convert(value))
176
+
177
+ def __iter__(self):
178
+ return (self._str_to_key(x) for x in self.fs.find(self.root))
179
+
180
+ def __len__(self):
181
+ return len(self.fs.find(self.root))
182
+
183
+ def __delitem__(self, key):
184
+ """Remove key"""
185
+ try:
186
+ self.fs.rm(self._key_to_str(key))
187
+ except Exception as exc:
188
+ raise KeyError from exc
189
+
190
+ def __contains__(self, key):
191
+ """Does key exist in mapping?"""
192
+ path = self._key_to_str(key)
193
+ return self.fs.isfile(path)
194
+
195
+ def __reduce__(self):
196
+ return FSMap, (self.root, self.fs, False, False, self.missing_exceptions)
197
+
198
+
199
+ def maybe_convert(value):
200
+ if isinstance(value, array.array) or hasattr(value, "__array__"):
201
+ # bytes-like things
202
+ if hasattr(value, "dtype") and value.dtype.kind in "Mm":
203
+ # The buffer interface doesn't support datetime64/timdelta64 numpy
204
+ # arrays
205
+ value = value.view("int64")
206
+ value = bytes(memoryview(value))
207
+ return value
208
+
209
+
210
+ def get_mapper(
211
+ url="",
212
+ check=False,
213
+ create=False,
214
+ missing_exceptions=None,
215
+ alternate_root=None,
216
+ **kwargs,
217
+ ):
218
+ """Create key-value interface for given URL and options
219
+
220
+ The URL will be of the form "protocol://location" and point to the root
221
+ of the mapper required. All keys will be file-names below this location,
222
+ and their values the contents of each key.
223
+
224
+ Also accepts compound URLs like zip::s3://bucket/file.zip , see ``fsspec.open``.
225
+
226
+ Parameters
227
+ ----------
228
+ url: str
229
+ Root URL of mapping
230
+ check: bool
231
+ Whether to attempt to read from the location before instantiation, to
232
+ check that the mapping does exist
233
+ create: bool
234
+ Whether to make the directory corresponding to the root before
235
+ instantiating
236
+ missing_exceptions: None or tuple
237
+ If given, these exception types will be regarded as missing keys and
238
+ return KeyError when trying to read data. By default, you get
239
+ (FileNotFoundError, IsADirectoryError, NotADirectoryError)
240
+ alternate_root: None or str
241
+ In cases of complex URLs, the parser may fail to pick the correct part
242
+ for the mapper root, so this arg can override
243
+
244
+ Returns
245
+ -------
246
+ ``FSMap`` instance, the dict-like key-value store.
247
+ """
248
+ # Removing protocol here - could defer to each open() on the backend
249
+ fs, urlpath = url_to_fs(url, **kwargs)
250
+ root = alternate_root if alternate_root is not None else urlpath
251
+ return FSMap(root, fs, check, create, missing_exceptions=missing_exceptions)
venv/lib/python3.13/site-packages/fsspec/parquet.py ADDED
@@ -0,0 +1,541 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import json
3
+ import warnings
4
+
5
+ from .core import url_to_fs
6
+ from .utils import merge_offset_ranges
7
+
8
+ # Parquet-Specific Utilities for fsspec
9
+ #
10
+ # Most of the functions defined in this module are NOT
11
+ # intended for public consumption. The only exception
12
+ # to this is `open_parquet_file`, which should be used
13
+ # place of `fs.open()` to open parquet-formatted files
14
+ # on remote file systems.
15
+
16
+
17
+ def open_parquet_file(
18
+ path,
19
+ mode="rb",
20
+ fs=None,
21
+ metadata=None,
22
+ columns=None,
23
+ row_groups=None,
24
+ storage_options=None,
25
+ strict=False,
26
+ engine="auto",
27
+ max_gap=64_000,
28
+ max_block=256_000_000,
29
+ footer_sample_size=1_000_000,
30
+ **kwargs,
31
+ ):
32
+ """
33
+ Return a file-like object for a single Parquet file.
34
+
35
+ The specified parquet `engine` will be used to parse the
36
+ footer metadata, and determine the required byte ranges
37
+ from the file. The target path will then be opened with
38
+ the "parts" (`KnownPartsOfAFile`) caching strategy.
39
+
40
+ Note that this method is intended for usage with remote
41
+ file systems, and is unlikely to improve parquet-read
42
+ performance on local file systems.
43
+
44
+ Parameters
45
+ ----------
46
+ path: str
47
+ Target file path.
48
+ mode: str, optional
49
+ Mode option to be passed through to `fs.open`. Default is "rb".
50
+ metadata: Any, optional
51
+ Parquet metadata object. Object type must be supported
52
+ by the backend parquet engine. For now, only the "fastparquet"
53
+ engine supports an explicit `ParquetFile` metadata object.
54
+ If a metadata object is supplied, the remote footer metadata
55
+ will not need to be transferred into local memory.
56
+ fs: AbstractFileSystem, optional
57
+ Filesystem object to use for opening the file. If nothing is
58
+ specified, an `AbstractFileSystem` object will be inferred.
59
+ engine : str, default "auto"
60
+ Parquet engine to use for metadata parsing. Allowed options
61
+ include "fastparquet", "pyarrow", and "auto". The specified
62
+ engine must be installed in the current environment. If
63
+ "auto" is specified, and both engines are installed,
64
+ "fastparquet" will take precedence over "pyarrow".
65
+ columns: list, optional
66
+ List of all column names that may be read from the file.
67
+ row_groups : list, optional
68
+ List of all row-groups that may be read from the file. This
69
+ may be a list of row-group indices (integers), or it may be
70
+ a list of `RowGroup` metadata objects (if the "fastparquet"
71
+ engine is used).
72
+ storage_options : dict, optional
73
+ Used to generate an `AbstractFileSystem` object if `fs` was
74
+ not specified.
75
+ strict : bool, optional
76
+ Whether the resulting `KnownPartsOfAFile` cache should
77
+ fetch reads that go beyond a known byte-range boundary.
78
+ If `False` (the default), any read that ends outside a
79
+ known part will be zero padded. Note that using
80
+ `strict=True` may be useful for debugging.
81
+ max_gap : int, optional
82
+ Neighboring byte ranges will only be merged when their
83
+ inter-range gap is <= `max_gap`. Default is 64KB.
84
+ max_block : int, optional
85
+ Neighboring byte ranges will only be merged when the size of
86
+ the aggregated range is <= `max_block`. Default is 256MB.
87
+ footer_sample_size : int, optional
88
+ Number of bytes to read from the end of the path to look
89
+ for the footer metadata. If the sampled bytes do not contain
90
+ the footer, a second read request will be required, and
91
+ performance will suffer. Default is 1MB.
92
+ **kwargs :
93
+ Optional key-word arguments to pass to `fs.open`
94
+ """
95
+
96
+ # Make sure we have an `AbstractFileSystem` object
97
+ # to work with
98
+ if fs is None:
99
+ fs = url_to_fs(path, **(storage_options or {}))[0]
100
+
101
+ # For now, `columns == []` not supported. Just use
102
+ # default `open` command with `path` input
103
+ if columns is not None and len(columns) == 0:
104
+ return fs.open(path, mode=mode)
105
+
106
+ # Set the engine
107
+ engine = _set_engine(engine)
108
+
109
+ # Fetch the known byte ranges needed to read
110
+ # `columns` and/or `row_groups`
111
+ data = _get_parquet_byte_ranges(
112
+ [path],
113
+ fs,
114
+ metadata=metadata,
115
+ columns=columns,
116
+ row_groups=row_groups,
117
+ engine=engine,
118
+ max_gap=max_gap,
119
+ max_block=max_block,
120
+ footer_sample_size=footer_sample_size,
121
+ )
122
+
123
+ # Extract file name from `data`
124
+ fn = next(iter(data)) if data else path
125
+
126
+ # Call self.open with "parts" caching
127
+ options = kwargs.pop("cache_options", {}).copy()
128
+ return fs.open(
129
+ fn,
130
+ mode=mode,
131
+ cache_type="parts",
132
+ cache_options={
133
+ **options,
134
+ "data": data.get(fn, {}),
135
+ "strict": strict,
136
+ },
137
+ **kwargs,
138
+ )
139
+
140
+
141
+ def _get_parquet_byte_ranges(
142
+ paths,
143
+ fs,
144
+ metadata=None,
145
+ columns=None,
146
+ row_groups=None,
147
+ max_gap=64_000,
148
+ max_block=256_000_000,
149
+ footer_sample_size=1_000_000,
150
+ engine="auto",
151
+ ):
152
+ """Get a dictionary of the known byte ranges needed
153
+ to read a specific column/row-group selection from a
154
+ Parquet dataset. Each value in the output dictionary
155
+ is intended for use as the `data` argument for the
156
+ `KnownPartsOfAFile` caching strategy of a single path.
157
+ """
158
+
159
+ # Set engine if necessary
160
+ if isinstance(engine, str):
161
+ engine = _set_engine(engine)
162
+
163
+ # Pass to specialized function if metadata is defined
164
+ if metadata is not None:
165
+ # Use the provided parquet metadata object
166
+ # to avoid transferring/parsing footer metadata
167
+ return _get_parquet_byte_ranges_from_metadata(
168
+ metadata,
169
+ fs,
170
+ engine,
171
+ columns=columns,
172
+ row_groups=row_groups,
173
+ max_gap=max_gap,
174
+ max_block=max_block,
175
+ )
176
+
177
+ # Get file sizes asynchronously
178
+ file_sizes = fs.sizes(paths)
179
+
180
+ # Populate global paths, starts, & ends
181
+ result = {}
182
+ data_paths = []
183
+ data_starts = []
184
+ data_ends = []
185
+ add_header_magic = True
186
+ if columns is None and row_groups is None:
187
+ # We are NOT selecting specific columns or row-groups.
188
+ #
189
+ # We can avoid sampling the footers, and just transfer
190
+ # all file data with cat_ranges
191
+ for i, path in enumerate(paths):
192
+ result[path] = {}
193
+ for b in range(0, file_sizes[i], max_block):
194
+ data_paths.append(path)
195
+ data_starts.append(b)
196
+ data_ends.append(min(b + max_block, file_sizes[i]))
197
+ add_header_magic = False # "Magic" should already be included
198
+ else:
199
+ # We ARE selecting specific columns or row-groups.
200
+ #
201
+ # Gather file footers.
202
+ # We just take the last `footer_sample_size` bytes of each
203
+ # file (or the entire file if it is smaller than that)
204
+ footer_starts = []
205
+ footer_ends = []
206
+ for i, path in enumerate(paths):
207
+ footer_ends.append(file_sizes[i])
208
+ sample_size = max(0, file_sizes[i] - footer_sample_size)
209
+ footer_starts.append(sample_size)
210
+ footer_samples = fs.cat_ranges(paths, footer_starts, footer_ends)
211
+
212
+ # Check our footer samples and re-sample if necessary.
213
+ missing_footer_starts = footer_starts.copy()
214
+ large_footer = 0
215
+ for i, path in enumerate(paths):
216
+ footer_size = int.from_bytes(footer_samples[i][-8:-4], "little")
217
+ real_footer_start = file_sizes[i] - (footer_size + 8)
218
+ if real_footer_start < footer_starts[i]:
219
+ missing_footer_starts[i] = real_footer_start
220
+ large_footer = max(large_footer, (footer_size + 8))
221
+ if large_footer:
222
+ warnings.warn(
223
+ f"Not enough data was used to sample the parquet footer. "
224
+ f"Try setting footer_sample_size >= {large_footer}."
225
+ )
226
+ for i, block in enumerate(
227
+ fs.cat_ranges(
228
+ paths,
229
+ missing_footer_starts,
230
+ footer_starts,
231
+ )
232
+ ):
233
+ footer_samples[i] = block + footer_samples[i]
234
+ footer_starts[i] = missing_footer_starts[i]
235
+
236
+ # Calculate required byte ranges for each path
237
+ for i, path in enumerate(paths):
238
+ # Deal with small-file case.
239
+ # Just include all remaining bytes of the file
240
+ # in a single range.
241
+ if file_sizes[i] < max_block:
242
+ if footer_starts[i] > 0:
243
+ # Only need to transfer the data if the
244
+ # footer sample isn't already the whole file
245
+ data_paths.append(path)
246
+ data_starts.append(0)
247
+ data_ends.append(footer_starts[i])
248
+ continue
249
+
250
+ # Use "engine" to collect data byte ranges
251
+ path_data_starts, path_data_ends = engine._parquet_byte_ranges(
252
+ columns,
253
+ row_groups=row_groups,
254
+ footer=footer_samples[i],
255
+ footer_start=footer_starts[i],
256
+ )
257
+
258
+ data_paths += [path] * len(path_data_starts)
259
+ data_starts += path_data_starts
260
+ data_ends += path_data_ends
261
+
262
+ # Merge adjacent offset ranges
263
+ data_paths, data_starts, data_ends = merge_offset_ranges(
264
+ data_paths,
265
+ data_starts,
266
+ data_ends,
267
+ max_gap=max_gap,
268
+ max_block=max_block,
269
+ sort=False, # Should already be sorted
270
+ )
271
+
272
+ # Start by populating `result` with footer samples
273
+ for i, path in enumerate(paths):
274
+ result[path] = {(footer_starts[i], footer_ends[i]): footer_samples[i]}
275
+
276
+ # Transfer the data byte-ranges into local memory
277
+ _transfer_ranges(fs, result, data_paths, data_starts, data_ends)
278
+
279
+ # Add b"PAR1" to header if necessary
280
+ if add_header_magic:
281
+ _add_header_magic(result)
282
+
283
+ return result
284
+
285
+
286
+ def _get_parquet_byte_ranges_from_metadata(
287
+ metadata,
288
+ fs,
289
+ engine,
290
+ columns=None,
291
+ row_groups=None,
292
+ max_gap=64_000,
293
+ max_block=256_000_000,
294
+ ):
295
+ """Simplified version of `_get_parquet_byte_ranges` for
296
+ the case that an engine-specific `metadata` object is
297
+ provided, and the remote footer metadata does not need to
298
+ be transferred before calculating the required byte ranges.
299
+ """
300
+
301
+ # Use "engine" to collect data byte ranges
302
+ data_paths, data_starts, data_ends = engine._parquet_byte_ranges(
303
+ columns,
304
+ row_groups=row_groups,
305
+ metadata=metadata,
306
+ )
307
+
308
+ # Merge adjacent offset ranges
309
+ data_paths, data_starts, data_ends = merge_offset_ranges(
310
+ data_paths,
311
+ data_starts,
312
+ data_ends,
313
+ max_gap=max_gap,
314
+ max_block=max_block,
315
+ sort=False, # Should be sorted
316
+ )
317
+
318
+ # Transfer the data byte-ranges into local memory
319
+ result = {fn: {} for fn in list(set(data_paths))}
320
+ _transfer_ranges(fs, result, data_paths, data_starts, data_ends)
321
+
322
+ # Add b"PAR1" to header
323
+ _add_header_magic(result)
324
+
325
+ return result
326
+
327
+
328
+ def _transfer_ranges(fs, blocks, paths, starts, ends):
329
+ # Use cat_ranges to gather the data byte_ranges
330
+ ranges = (paths, starts, ends)
331
+ for path, start, stop, data in zip(*ranges, fs.cat_ranges(*ranges)):
332
+ blocks[path][(start, stop)] = data
333
+
334
+
335
+ def _add_header_magic(data):
336
+ # Add b"PAR1" to file headers
337
+ for path in list(data.keys()):
338
+ add_magic = True
339
+ for k in data[path]:
340
+ if k[0] == 0 and k[1] >= 4:
341
+ add_magic = False
342
+ break
343
+ if add_magic:
344
+ data[path][(0, 4)] = b"PAR1"
345
+
346
+
347
+ def _set_engine(engine_str):
348
+ # Define a list of parquet engines to try
349
+ if engine_str == "auto":
350
+ try_engines = ("fastparquet", "pyarrow")
351
+ elif not isinstance(engine_str, str):
352
+ raise ValueError(
353
+ "Failed to set parquet engine! "
354
+ "Please pass 'fastparquet', 'pyarrow', or 'auto'"
355
+ )
356
+ elif engine_str not in ("fastparquet", "pyarrow"):
357
+ raise ValueError(f"{engine_str} engine not supported by `fsspec.parquet`")
358
+ else:
359
+ try_engines = [engine_str]
360
+
361
+ # Try importing the engines in `try_engines`,
362
+ # and choose the first one that succeeds
363
+ for engine in try_engines:
364
+ try:
365
+ if engine == "fastparquet":
366
+ return FastparquetEngine()
367
+ elif engine == "pyarrow":
368
+ return PyarrowEngine()
369
+ except ImportError:
370
+ pass
371
+
372
+ # Raise an error if a supported parquet engine
373
+ # was not found
374
+ raise ImportError(
375
+ f"The following parquet engines are not installed "
376
+ f"in your python environment: {try_engines}."
377
+ f"Please install 'fastparquert' or 'pyarrow' to "
378
+ f"utilize the `fsspec.parquet` module."
379
+ )
380
+
381
+
382
+ class FastparquetEngine:
383
+ # The purpose of the FastparquetEngine class is
384
+ # to check if fastparquet can be imported (on initialization)
385
+ # and to define a `_parquet_byte_ranges` method. In the
386
+ # future, this class may also be used to define other
387
+ # methods/logic that are specific to fastparquet.
388
+
389
+ def __init__(self):
390
+ import fastparquet as fp
391
+
392
+ self.fp = fp
393
+
394
+ def _row_group_filename(self, row_group, pf):
395
+ return pf.row_group_filename(row_group)
396
+
397
+ def _parquet_byte_ranges(
398
+ self,
399
+ columns,
400
+ row_groups=None,
401
+ metadata=None,
402
+ footer=None,
403
+ footer_start=None,
404
+ ):
405
+ # Initialize offset ranges and define ParqetFile metadata
406
+ pf = metadata
407
+ data_paths, data_starts, data_ends = [], [], []
408
+ if pf is None:
409
+ pf = self.fp.ParquetFile(io.BytesIO(footer))
410
+
411
+ # Convert columns to a set and add any index columns
412
+ # specified in the pandas metadata (just in case)
413
+ column_set = None if columns is None else set(columns)
414
+ if column_set is not None and hasattr(pf, "pandas_metadata"):
415
+ md_index = [
416
+ ind
417
+ for ind in pf.pandas_metadata.get("index_columns", [])
418
+ # Ignore RangeIndex information
419
+ if not isinstance(ind, dict)
420
+ ]
421
+ column_set |= set(md_index)
422
+
423
+ # Check if row_groups is a list of integers
424
+ # or a list of row-group metadata
425
+ if row_groups and not isinstance(row_groups[0], int):
426
+ # Input row_groups contains row-group metadata
427
+ row_group_indices = None
428
+ else:
429
+ # Input row_groups contains row-group indices
430
+ row_group_indices = row_groups
431
+ row_groups = pf.row_groups
432
+
433
+ # Loop through column chunks to add required byte ranges
434
+ for r, row_group in enumerate(row_groups):
435
+ # Skip this row-group if we are targeting
436
+ # specific row-groups
437
+ if row_group_indices is None or r in row_group_indices:
438
+ # Find the target parquet-file path for `row_group`
439
+ fn = self._row_group_filename(row_group, pf)
440
+
441
+ for column in row_group.columns:
442
+ name = column.meta_data.path_in_schema[0]
443
+ # Skip this column if we are targeting a
444
+ # specific columns
445
+ if column_set is None or name in column_set:
446
+ file_offset0 = column.meta_data.dictionary_page_offset
447
+ if file_offset0 is None:
448
+ file_offset0 = column.meta_data.data_page_offset
449
+ num_bytes = column.meta_data.total_compressed_size
450
+ if footer_start is None or file_offset0 < footer_start:
451
+ data_paths.append(fn)
452
+ data_starts.append(file_offset0)
453
+ data_ends.append(
454
+ min(
455
+ file_offset0 + num_bytes,
456
+ footer_start or (file_offset0 + num_bytes),
457
+ )
458
+ )
459
+
460
+ if metadata:
461
+ # The metadata in this call may map to multiple
462
+ # file paths. Need to include `data_paths`
463
+ return data_paths, data_starts, data_ends
464
+ return data_starts, data_ends
465
+
466
+
467
+ class PyarrowEngine:
468
+ # The purpose of the PyarrowEngine class is
469
+ # to check if pyarrow can be imported (on initialization)
470
+ # and to define a `_parquet_byte_ranges` method. In the
471
+ # future, this class may also be used to define other
472
+ # methods/logic that are specific to pyarrow.
473
+
474
+ def __init__(self):
475
+ import pyarrow.parquet as pq
476
+
477
+ self.pq = pq
478
+
479
+ def _row_group_filename(self, row_group, metadata):
480
+ raise NotImplementedError
481
+
482
+ def _parquet_byte_ranges(
483
+ self,
484
+ columns,
485
+ row_groups=None,
486
+ metadata=None,
487
+ footer=None,
488
+ footer_start=None,
489
+ ):
490
+ if metadata is not None:
491
+ raise ValueError("metadata input not supported for PyarrowEngine")
492
+
493
+ data_starts, data_ends = [], []
494
+ md = self.pq.ParquetFile(io.BytesIO(footer)).metadata
495
+
496
+ # Convert columns to a set and add any index columns
497
+ # specified in the pandas metadata (just in case)
498
+ column_set = None if columns is None else set(columns)
499
+ if column_set is not None:
500
+ schema = md.schema.to_arrow_schema()
501
+ has_pandas_metadata = (
502
+ schema.metadata is not None and b"pandas" in schema.metadata
503
+ )
504
+ if has_pandas_metadata:
505
+ md_index = [
506
+ ind
507
+ for ind in json.loads(
508
+ schema.metadata[b"pandas"].decode("utf8")
509
+ ).get("index_columns", [])
510
+ # Ignore RangeIndex information
511
+ if not isinstance(ind, dict)
512
+ ]
513
+ column_set |= set(md_index)
514
+
515
+ # Loop through column chunks to add required byte ranges
516
+ for r in range(md.num_row_groups):
517
+ # Skip this row-group if we are targeting
518
+ # specific row-groups
519
+ if row_groups is None or r in row_groups:
520
+ row_group = md.row_group(r)
521
+ for c in range(row_group.num_columns):
522
+ column = row_group.column(c)
523
+ name = column.path_in_schema
524
+ # Skip this column if we are targeting a
525
+ # specific columns
526
+ split_name = name.split(".")[0]
527
+ if (
528
+ column_set is None
529
+ or name in column_set
530
+ or split_name in column_set
531
+ ):
532
+ file_offset0 = column.dictionary_page_offset
533
+ if file_offset0 is None:
534
+ file_offset0 = column.data_page_offset
535
+ num_bytes = column.total_compressed_size
536
+ if file_offset0 < footer_start:
537
+ data_starts.append(file_offset0)
538
+ data_ends.append(
539
+ min(file_offset0 + num_bytes, footer_start)
540
+ )
541
+ return data_starts, data_ends
venv/lib/python3.13/site-packages/fsspec/registry.py ADDED
@@ -0,0 +1,330 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import importlib
4
+ import types
5
+ import warnings
6
+
7
+ __all__ = ["registry", "get_filesystem_class", "default"]
8
+
9
+ # internal, mutable
10
+ _registry: dict[str, type] = {}
11
+
12
+ # external, immutable
13
+ registry = types.MappingProxyType(_registry)
14
+ default = "file"
15
+
16
+
17
+ def register_implementation(name, cls, clobber=False, errtxt=None):
18
+ """Add implementation class to the registry
19
+
20
+ Parameters
21
+ ----------
22
+ name: str
23
+ Protocol name to associate with the class
24
+ cls: class or str
25
+ if a class: fsspec-compliant implementation class (normally inherits from
26
+ ``fsspec.AbstractFileSystem``, gets added straight to the registry. If a
27
+ str, the full path to an implementation class like package.module.class,
28
+ which gets added to known_implementations,
29
+ so the import is deferred until the filesystem is actually used.
30
+ clobber: bool (optional)
31
+ Whether to overwrite a protocol with the same name; if False, will raise
32
+ instead.
33
+ errtxt: str (optional)
34
+ If given, then a failure to import the given class will result in this
35
+ text being given.
36
+ """
37
+ if isinstance(cls, str):
38
+ if name in known_implementations and clobber is False:
39
+ if cls != known_implementations[name]["class"]:
40
+ raise ValueError(
41
+ f"Name ({name}) already in the known_implementations and clobber "
42
+ f"is False"
43
+ )
44
+ else:
45
+ known_implementations[name] = {
46
+ "class": cls,
47
+ "err": errtxt or f"{cls} import failed for protocol {name}",
48
+ }
49
+
50
+ else:
51
+ if name in registry and clobber is False:
52
+ if _registry[name] is not cls:
53
+ raise ValueError(
54
+ f"Name ({name}) already in the registry and clobber is False"
55
+ )
56
+ else:
57
+ _registry[name] = cls
58
+
59
+
60
+ # protocols mapped to the class which implements them. This dict can be
61
+ # updated with register_implementation
62
+ known_implementations = {
63
+ "abfs": {
64
+ "class": "adlfs.AzureBlobFileSystem",
65
+ "err": "Install adlfs to access Azure Datalake Gen2 and Azure Blob Storage",
66
+ },
67
+ "adl": {
68
+ "class": "adlfs.AzureDatalakeFileSystem",
69
+ "err": "Install adlfs to access Azure Datalake Gen1",
70
+ },
71
+ "arrow_hdfs": {
72
+ "class": "fsspec.implementations.arrow.HadoopFileSystem",
73
+ "err": "pyarrow and local java libraries required for HDFS",
74
+ },
75
+ "asynclocal": {
76
+ "class": "morefs.asyn_local.AsyncLocalFileSystem",
77
+ "err": "Install 'morefs[asynclocalfs]' to use AsyncLocalFileSystem",
78
+ },
79
+ "asyncwrapper": {
80
+ "class": "fsspec.implementations.asyn_wrapper.AsyncFileSystemWrapper",
81
+ },
82
+ "az": {
83
+ "class": "adlfs.AzureBlobFileSystem",
84
+ "err": "Install adlfs to access Azure Datalake Gen2 and Azure Blob Storage",
85
+ },
86
+ "blockcache": {"class": "fsspec.implementations.cached.CachingFileSystem"},
87
+ "box": {
88
+ "class": "boxfs.BoxFileSystem",
89
+ "err": "Please install boxfs to access BoxFileSystem",
90
+ },
91
+ "cached": {"class": "fsspec.implementations.cached.CachingFileSystem"},
92
+ "dask": {
93
+ "class": "fsspec.implementations.dask.DaskWorkerFileSystem",
94
+ "err": "Install dask distributed to access worker file system",
95
+ },
96
+ "data": {"class": "fsspec.implementations.data.DataFileSystem"},
97
+ "dbfs": {
98
+ "class": "fsspec.implementations.dbfs.DatabricksFileSystem",
99
+ "err": "Install the requests package to use the DatabricksFileSystem",
100
+ },
101
+ "dir": {"class": "fsspec.implementations.dirfs.DirFileSystem"},
102
+ "dropbox": {
103
+ "class": "dropboxdrivefs.DropboxDriveFileSystem",
104
+ "err": (
105
+ 'DropboxFileSystem requires "dropboxdrivefs","requests" and "'
106
+ '"dropbox" to be installed'
107
+ ),
108
+ },
109
+ "dvc": {
110
+ "class": "dvc.api.DVCFileSystem",
111
+ "err": "Install dvc to access DVCFileSystem",
112
+ },
113
+ "file": {"class": "fsspec.implementations.local.LocalFileSystem"},
114
+ "filecache": {"class": "fsspec.implementations.cached.WholeFileCacheFileSystem"},
115
+ "ftp": {"class": "fsspec.implementations.ftp.FTPFileSystem"},
116
+ "gcs": {
117
+ "class": "gcsfs.GCSFileSystem",
118
+ "err": "Please install gcsfs to access Google Storage",
119
+ },
120
+ "gdrive": {
121
+ "class": "gdrive_fsspec.GoogleDriveFileSystem",
122
+ "err": "Please install gdrive_fs for access to Google Drive",
123
+ },
124
+ "generic": {"class": "fsspec.generic.GenericFileSystem"},
125
+ "gist": {
126
+ "class": "fsspec.implementations.gist.GistFileSystem",
127
+ "err": "Install the requests package to use the gist FS",
128
+ },
129
+ "git": {
130
+ "class": "fsspec.implementations.git.GitFileSystem",
131
+ "err": "Install pygit2 to browse local git repos",
132
+ },
133
+ "github": {
134
+ "class": "fsspec.implementations.github.GithubFileSystem",
135
+ "err": "Install the requests package to use the github FS",
136
+ },
137
+ "gs": {
138
+ "class": "gcsfs.GCSFileSystem",
139
+ "err": "Please install gcsfs to access Google Storage",
140
+ },
141
+ "hdfs": {
142
+ "class": "fsspec.implementations.arrow.HadoopFileSystem",
143
+ "err": "pyarrow and local java libraries required for HDFS",
144
+ },
145
+ "hf": {
146
+ "class": "huggingface_hub.HfFileSystem",
147
+ "err": "Install huggingface_hub to access HfFileSystem",
148
+ },
149
+ "http": {
150
+ "class": "fsspec.implementations.http.HTTPFileSystem",
151
+ "err": 'HTTPFileSystem requires "requests" and "aiohttp" to be installed',
152
+ },
153
+ "https": {
154
+ "class": "fsspec.implementations.http.HTTPFileSystem",
155
+ "err": 'HTTPFileSystem requires "requests" and "aiohttp" to be installed',
156
+ },
157
+ "jlab": {
158
+ "class": "fsspec.implementations.jupyter.JupyterFileSystem",
159
+ "err": "Jupyter FS requires requests to be installed",
160
+ },
161
+ "jupyter": {
162
+ "class": "fsspec.implementations.jupyter.JupyterFileSystem",
163
+ "err": "Jupyter FS requires requests to be installed",
164
+ },
165
+ "lakefs": {
166
+ "class": "lakefs_spec.LakeFSFileSystem",
167
+ "err": "Please install lakefs-spec to access LakeFSFileSystem",
168
+ },
169
+ "libarchive": {
170
+ "class": "fsspec.implementations.libarchive.LibArchiveFileSystem",
171
+ "err": "LibArchive requires to be installed",
172
+ },
173
+ "local": {"class": "fsspec.implementations.local.LocalFileSystem"},
174
+ "memory": {"class": "fsspec.implementations.memory.MemoryFileSystem"},
175
+ "oci": {
176
+ "class": "ocifs.OCIFileSystem",
177
+ "err": "Install ocifs to access OCI Object Storage",
178
+ },
179
+ "ocilake": {
180
+ "class": "ocifs.OCIFileSystem",
181
+ "err": "Install ocifs to access OCI Data Lake",
182
+ },
183
+ "oss": {
184
+ "class": "ossfs.OSSFileSystem",
185
+ "err": "Install ossfs to access Alibaba Object Storage System",
186
+ },
187
+ "pyscript": {
188
+ "class": "pyscript_fsspec_client.client.PyscriptFileSystem",
189
+ "err": "Install requests (cpython) or run in pyscript",
190
+ },
191
+ "reference": {"class": "fsspec.implementations.reference.ReferenceFileSystem"},
192
+ "root": {
193
+ "class": "fsspec_xrootd.XRootDFileSystem",
194
+ "err": (
195
+ "Install fsspec-xrootd to access xrootd storage system. "
196
+ "Note: 'root' is the protocol name for xrootd storage systems, "
197
+ "not referring to root directories"
198
+ ),
199
+ },
200
+ "s3": {"class": "s3fs.S3FileSystem", "err": "Install s3fs to access S3"},
201
+ "s3a": {"class": "s3fs.S3FileSystem", "err": "Install s3fs to access S3"},
202
+ "sftp": {
203
+ "class": "fsspec.implementations.sftp.SFTPFileSystem",
204
+ "err": 'SFTPFileSystem requires "paramiko" to be installed',
205
+ },
206
+ "simplecache": {"class": "fsspec.implementations.cached.SimpleCacheFileSystem"},
207
+ "smb": {
208
+ "class": "fsspec.implementations.smb.SMBFileSystem",
209
+ "err": 'SMB requires "smbprotocol" or "smbprotocol[kerberos]" installed',
210
+ },
211
+ "ssh": {
212
+ "class": "fsspec.implementations.sftp.SFTPFileSystem",
213
+ "err": 'SFTPFileSystem requires "paramiko" to be installed',
214
+ },
215
+ "tar": {"class": "fsspec.implementations.tar.TarFileSystem"},
216
+ "tos": {
217
+ "class": "tosfs.TosFileSystem",
218
+ "err": "Install tosfs to access ByteDance volcano engine Tinder Object Storage",
219
+ },
220
+ "tosfs": {
221
+ "class": "tosfs.TosFileSystem",
222
+ "err": "Install tosfs to access ByteDance volcano engine Tinder Object Storage",
223
+ },
224
+ "wandb": {"class": "wandbfs.WandbFS", "err": "Install wandbfs to access wandb"},
225
+ "webdav": {
226
+ "class": "webdav4.fsspec.WebdavFileSystem",
227
+ "err": "Install webdav4 to access WebDAV",
228
+ },
229
+ "webhdfs": {
230
+ "class": "fsspec.implementations.webhdfs.WebHDFS",
231
+ "err": 'webHDFS access requires "requests" to be installed',
232
+ },
233
+ "zip": {"class": "fsspec.implementations.zip.ZipFileSystem"},
234
+ }
235
+
236
+ assert list(known_implementations) == sorted(known_implementations), (
237
+ "Not in alphabetical order"
238
+ )
239
+
240
+
241
+ def get_filesystem_class(protocol):
242
+ """Fetch named protocol implementation from the registry
243
+
244
+ The dict ``known_implementations`` maps protocol names to the locations
245
+ of classes implementing the corresponding file-system. When used for the
246
+ first time, appropriate imports will happen and the class will be placed in
247
+ the registry. All subsequent calls will fetch directly from the registry.
248
+
249
+ Some protocol implementations require additional dependencies, and so the
250
+ import may fail. In this case, the string in the "err" field of the
251
+ ``known_implementations`` will be given as the error message.
252
+ """
253
+ if not protocol:
254
+ protocol = default
255
+
256
+ if protocol not in registry:
257
+ if protocol not in known_implementations:
258
+ raise ValueError(f"Protocol not known: {protocol}")
259
+ bit = known_implementations[protocol]
260
+ try:
261
+ register_implementation(protocol, _import_class(bit["class"]))
262
+ except ImportError as e:
263
+ raise ImportError(bit.get("err")) from e
264
+ cls = registry[protocol]
265
+ if getattr(cls, "protocol", None) in ("abstract", None):
266
+ cls.protocol = protocol
267
+
268
+ return cls
269
+
270
+
271
+ s3_msg = """Your installed version of s3fs is very old and known to cause
272
+ severe performance issues, see also https://github.com/dask/dask/issues/10276
273
+
274
+ To fix, you should specify a lower version bound on s3fs, or
275
+ update the current installation.
276
+ """
277
+
278
+
279
+ def _import_class(fqp: str):
280
+ """Take a fully-qualified path and return the imported class or identifier.
281
+
282
+ ``fqp`` is of the form "package.module.klass" or
283
+ "package.module:subobject.klass".
284
+
285
+ Warnings
286
+ --------
287
+ This can import arbitrary modules. Make sure you haven't installed any modules
288
+ that may execute malicious code at import time.
289
+ """
290
+ if ":" in fqp:
291
+ mod, name = fqp.rsplit(":", 1)
292
+ else:
293
+ mod, name = fqp.rsplit(".", 1)
294
+
295
+ is_s3 = mod == "s3fs"
296
+ mod = importlib.import_module(mod)
297
+ if is_s3 and mod.__version__.split(".") < ["0", "5"]:
298
+ warnings.warn(s3_msg)
299
+ for part in name.split("."):
300
+ mod = getattr(mod, part)
301
+
302
+ if not isinstance(mod, type):
303
+ raise TypeError(f"{fqp} is not a class")
304
+
305
+ return mod
306
+
307
+
308
+ def filesystem(protocol, **storage_options):
309
+ """Instantiate filesystems for given protocol and arguments
310
+
311
+ ``storage_options`` are specific to the protocol being chosen, and are
312
+ passed directly to the class.
313
+ """
314
+ if protocol == "arrow_hdfs":
315
+ warnings.warn(
316
+ "The 'arrow_hdfs' protocol has been deprecated and will be "
317
+ "removed in the future. Specify it as 'hdfs'.",
318
+ DeprecationWarning,
319
+ )
320
+
321
+ cls = get_filesystem_class(protocol)
322
+ return cls(**storage_options)
323
+
324
+
325
+ def available_protocols():
326
+ """Return a list of the implemented protocols.
327
+
328
+ Note that any given protocol may require extra packages to be importable.
329
+ """
330
+ return list(known_implementations)
venv/lib/python3.13/site-packages/fsspec/spec.py ADDED
@@ -0,0 +1,2270 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import io
4
+ import json
5
+ import logging
6
+ import os
7
+ import threading
8
+ import warnings
9
+ import weakref
10
+ from errno import ESPIPE
11
+ from glob import has_magic
12
+ from hashlib import sha256
13
+ from typing import Any, ClassVar
14
+
15
+ from .callbacks import DEFAULT_CALLBACK
16
+ from .config import apply_config, conf
17
+ from .dircache import DirCache
18
+ from .transaction import Transaction
19
+ from .utils import (
20
+ _unstrip_protocol,
21
+ glob_translate,
22
+ isfilelike,
23
+ other_paths,
24
+ read_block,
25
+ stringify_path,
26
+ tokenize,
27
+ )
28
+
29
+ logger = logging.getLogger("fsspec")
30
+
31
+
32
+ def make_instance(cls, args, kwargs):
33
+ return cls(*args, **kwargs)
34
+
35
+
36
+ class _Cached(type):
37
+ """
38
+ Metaclass for caching file system instances.
39
+
40
+ Notes
41
+ -----
42
+ Instances are cached according to
43
+
44
+ * The values of the class attributes listed in `_extra_tokenize_attributes`
45
+ * The arguments passed to ``__init__``.
46
+
47
+ This creates an additional reference to the filesystem, which prevents the
48
+ filesystem from being garbage collected when all *user* references go away.
49
+ A call to the :meth:`AbstractFileSystem.clear_instance_cache` must *also*
50
+ be made for a filesystem instance to be garbage collected.
51
+ """
52
+
53
+ def __init__(cls, *args, **kwargs):
54
+ super().__init__(*args, **kwargs)
55
+ # Note: we intentionally create a reference here, to avoid garbage
56
+ # collecting instances when all other references are gone. To really
57
+ # delete a FileSystem, the cache must be cleared.
58
+ if conf.get("weakref_instance_cache"): # pragma: no cover
59
+ # debug option for analysing fork/spawn conditions
60
+ cls._cache = weakref.WeakValueDictionary()
61
+ else:
62
+ cls._cache = {}
63
+ cls._pid = os.getpid()
64
+
65
+ def __call__(cls, *args, **kwargs):
66
+ kwargs = apply_config(cls, kwargs)
67
+ extra_tokens = tuple(
68
+ getattr(cls, attr, None) for attr in cls._extra_tokenize_attributes
69
+ )
70
+ token = tokenize(
71
+ cls, cls._pid, threading.get_ident(), *args, *extra_tokens, **kwargs
72
+ )
73
+ skip = kwargs.pop("skip_instance_cache", False)
74
+ if os.getpid() != cls._pid:
75
+ cls._cache.clear()
76
+ cls._pid = os.getpid()
77
+ if not skip and cls.cachable and token in cls._cache:
78
+ cls._latest = token
79
+ return cls._cache[token]
80
+ else:
81
+ obj = super().__call__(*args, **kwargs)
82
+ # Setting _fs_token here causes some static linters to complain.
83
+ obj._fs_token_ = token
84
+ obj.storage_args = args
85
+ obj.storage_options = kwargs
86
+ if obj.async_impl and obj.mirror_sync_methods:
87
+ from .asyn import mirror_sync_methods
88
+
89
+ mirror_sync_methods(obj)
90
+
91
+ if cls.cachable and not skip:
92
+ cls._latest = token
93
+ cls._cache[token] = obj
94
+ return obj
95
+
96
+
97
+ class AbstractFileSystem(metaclass=_Cached):
98
+ """
99
+ An abstract super-class for pythonic file-systems
100
+
101
+ Implementations are expected to be compatible with or, better, subclass
102
+ from here.
103
+ """
104
+
105
+ cachable = True # this class can be cached, instances reused
106
+ _cached = False
107
+ blocksize = 2**22
108
+ sep = "/"
109
+ protocol: ClassVar[str | tuple[str, ...]] = "abstract"
110
+ _latest = None
111
+ async_impl = False
112
+ mirror_sync_methods = False
113
+ root_marker = "" # For some FSs, may require leading '/' or other character
114
+ transaction_type = Transaction
115
+
116
+ #: Extra *class attributes* that should be considered when hashing.
117
+ _extra_tokenize_attributes = ()
118
+
119
+ # Set by _Cached metaclass
120
+ storage_args: tuple[Any, ...]
121
+ storage_options: dict[str, Any]
122
+
123
+ def __init__(self, *args, **storage_options):
124
+ """Create and configure file-system instance
125
+
126
+ Instances may be cachable, so if similar enough arguments are seen
127
+ a new instance is not required. The token attribute exists to allow
128
+ implementations to cache instances if they wish.
129
+
130
+ A reasonable default should be provided if there are no arguments.
131
+
132
+ Subclasses should call this method.
133
+
134
+ Parameters
135
+ ----------
136
+ use_listings_cache, listings_expiry_time, max_paths:
137
+ passed to ``DirCache``, if the implementation supports
138
+ directory listing caching. Pass use_listings_cache=False
139
+ to disable such caching.
140
+ skip_instance_cache: bool
141
+ If this is a cachable implementation, pass True here to force
142
+ creating a new instance even if a matching instance exists, and prevent
143
+ storing this instance.
144
+ asynchronous: bool
145
+ loop: asyncio-compatible IOLoop or None
146
+ """
147
+ if self._cached:
148
+ # reusing instance, don't change
149
+ return
150
+ self._cached = True
151
+ self._intrans = False
152
+ self._transaction = None
153
+ self._invalidated_caches_in_transaction = []
154
+ self.dircache = DirCache(**storage_options)
155
+
156
+ if storage_options.pop("add_docs", None):
157
+ warnings.warn("add_docs is no longer supported.", FutureWarning)
158
+
159
+ if storage_options.pop("add_aliases", None):
160
+ warnings.warn("add_aliases has been removed.", FutureWarning)
161
+ # This is set in _Cached
162
+ self._fs_token_ = None
163
+
164
+ @property
165
+ def fsid(self):
166
+ """Persistent filesystem id that can be used to compare filesystems
167
+ across sessions.
168
+ """
169
+ raise NotImplementedError
170
+
171
+ @property
172
+ def _fs_token(self):
173
+ return self._fs_token_
174
+
175
+ def __dask_tokenize__(self):
176
+ return self._fs_token
177
+
178
+ def __hash__(self):
179
+ return int(self._fs_token, 16)
180
+
181
+ def __eq__(self, other):
182
+ return isinstance(other, type(self)) and self._fs_token == other._fs_token
183
+
184
+ def __reduce__(self):
185
+ return make_instance, (type(self), self.storage_args, self.storage_options)
186
+
187
+ @classmethod
188
+ def _strip_protocol(cls, path):
189
+ """Turn path from fully-qualified to file-system-specific
190
+
191
+ May require FS-specific handling, e.g., for relative paths or links.
192
+ """
193
+ if isinstance(path, list):
194
+ return [cls._strip_protocol(p) for p in path]
195
+ path = stringify_path(path)
196
+ protos = (cls.protocol,) if isinstance(cls.protocol, str) else cls.protocol
197
+ for protocol in protos:
198
+ if path.startswith(protocol + "://"):
199
+ path = path[len(protocol) + 3 :]
200
+ elif path.startswith(protocol + "::"):
201
+ path = path[len(protocol) + 2 :]
202
+ path = path.rstrip("/")
203
+ # use of root_marker to make minimum required path, e.g., "/"
204
+ return path or cls.root_marker
205
+
206
+ def unstrip_protocol(self, name: str) -> str:
207
+ """Format FS-specific path to generic, including protocol"""
208
+ protos = (self.protocol,) if isinstance(self.protocol, str) else self.protocol
209
+ for protocol in protos:
210
+ if name.startswith(f"{protocol}://"):
211
+ return name
212
+ return f"{protos[0]}://{name}"
213
+
214
+ @staticmethod
215
+ def _get_kwargs_from_urls(path):
216
+ """If kwargs can be encoded in the paths, extract them here
217
+
218
+ This should happen before instantiation of the class; incoming paths
219
+ then should be amended to strip the options in methods.
220
+
221
+ Examples may look like an sftp path "sftp://user@host:/my/path", where
222
+ the user and host should become kwargs and later get stripped.
223
+ """
224
+ # by default, nothing happens
225
+ return {}
226
+
227
+ @classmethod
228
+ def current(cls):
229
+ """Return the most recently instantiated FileSystem
230
+
231
+ If no instance has been created, then create one with defaults
232
+ """
233
+ if cls._latest in cls._cache:
234
+ return cls._cache[cls._latest]
235
+ return cls()
236
+
237
+ @property
238
+ def transaction(self):
239
+ """A context within which files are committed together upon exit
240
+
241
+ Requires the file class to implement `.commit()` and `.discard()`
242
+ for the normal and exception cases.
243
+ """
244
+ if self._transaction is None:
245
+ self._transaction = self.transaction_type(self)
246
+ return self._transaction
247
+
248
+ def start_transaction(self):
249
+ """Begin write transaction for deferring files, non-context version"""
250
+ self._intrans = True
251
+ self._transaction = self.transaction_type(self)
252
+ return self.transaction
253
+
254
+ def end_transaction(self):
255
+ """Finish write transaction, non-context version"""
256
+ self.transaction.complete()
257
+ self._transaction = None
258
+ # The invalid cache must be cleared after the transaction is completed.
259
+ for path in self._invalidated_caches_in_transaction:
260
+ self.invalidate_cache(path)
261
+ self._invalidated_caches_in_transaction.clear()
262
+
263
+ def invalidate_cache(self, path=None):
264
+ """
265
+ Discard any cached directory information
266
+
267
+ Parameters
268
+ ----------
269
+ path: string or None
270
+ If None, clear all listings cached else listings at or under given
271
+ path.
272
+ """
273
+ # Not necessary to implement invalidation mechanism, may have no cache.
274
+ # But if have, you should call this method of parent class from your
275
+ # subclass to ensure expiring caches after transacations correctly.
276
+ # See the implementation of FTPFileSystem in ftp.py
277
+ if self._intrans:
278
+ self._invalidated_caches_in_transaction.append(path)
279
+
280
+ def mkdir(self, path, create_parents=True, **kwargs):
281
+ """
282
+ Create directory entry at path
283
+
284
+ For systems that don't have true directories, may create an for
285
+ this instance only and not touch the real filesystem
286
+
287
+ Parameters
288
+ ----------
289
+ path: str
290
+ location
291
+ create_parents: bool
292
+ if True, this is equivalent to ``makedirs``
293
+ kwargs:
294
+ may be permissions, etc.
295
+ """
296
+ pass # not necessary to implement, may not have directories
297
+
298
+ def makedirs(self, path, exist_ok=False):
299
+ """Recursively make directories
300
+
301
+ Creates directory at path and any intervening required directories.
302
+ Raises exception if, for instance, the path already exists but is a
303
+ file.
304
+
305
+ Parameters
306
+ ----------
307
+ path: str
308
+ leaf directory name
309
+ exist_ok: bool (False)
310
+ If False, will error if the target already exists
311
+ """
312
+ pass # not necessary to implement, may not have directories
313
+
314
+ def rmdir(self, path):
315
+ """Remove a directory, if empty"""
316
+ pass # not necessary to implement, may not have directories
317
+
318
+ def ls(self, path, detail=True, **kwargs):
319
+ """List objects at path.
320
+
321
+ This should include subdirectories and files at that location. The
322
+ difference between a file and a directory must be clear when details
323
+ are requested.
324
+
325
+ The specific keys, or perhaps a FileInfo class, or similar, is TBD,
326
+ but must be consistent across implementations.
327
+ Must include:
328
+
329
+ - full path to the entry (without protocol)
330
+ - size of the entry, in bytes. If the value cannot be determined, will
331
+ be ``None``.
332
+ - type of entry, "file", "directory" or other
333
+
334
+ Additional information
335
+ may be present, appropriate to the file-system, e.g., generation,
336
+ checksum, etc.
337
+
338
+ May use refresh=True|False to allow use of self._ls_from_cache to
339
+ check for a saved listing and avoid calling the backend. This would be
340
+ common where listing may be expensive.
341
+
342
+ Parameters
343
+ ----------
344
+ path: str
345
+ detail: bool
346
+ if True, gives a list of dictionaries, where each is the same as
347
+ the result of ``info(path)``. If False, gives a list of paths
348
+ (str).
349
+ kwargs: may have additional backend-specific options, such as version
350
+ information
351
+
352
+ Returns
353
+ -------
354
+ List of strings if detail is False, or list of directory information
355
+ dicts if detail is True.
356
+ """
357
+ raise NotImplementedError
358
+
359
+ def _ls_from_cache(self, path):
360
+ """Check cache for listing
361
+
362
+ Returns listing, if found (may be empty list for a directly that exists
363
+ but contains nothing), None if not in cache.
364
+ """
365
+ parent = self._parent(path)
366
+ try:
367
+ return self.dircache[path.rstrip("/")]
368
+ except KeyError:
369
+ pass
370
+ try:
371
+ files = [
372
+ f
373
+ for f in self.dircache[parent]
374
+ if f["name"] == path
375
+ or (f["name"] == path.rstrip("/") and f["type"] == "directory")
376
+ ]
377
+ if len(files) == 0:
378
+ # parent dir was listed but did not contain this file
379
+ raise FileNotFoundError(path)
380
+ return files
381
+ except KeyError:
382
+ pass
383
+
384
+ def walk(self, path, maxdepth=None, topdown=True, on_error="omit", **kwargs):
385
+ """Return all files under the given path.
386
+
387
+ List all files, recursing into subdirectories; output is iterator-style,
388
+ like ``os.walk()``. For a simple list of files, ``find()`` is available.
389
+
390
+ When topdown is True, the caller can modify the dirnames list in-place (perhaps
391
+ using del or slice assignment), and walk() will
392
+ only recurse into the subdirectories whose names remain in dirnames;
393
+ this can be used to prune the search, impose a specific order of visiting,
394
+ or even to inform walk() about directories the caller creates or renames before
395
+ it resumes walk() again.
396
+ Modifying dirnames when topdown is False has no effect. (see os.walk)
397
+
398
+ Note that the "files" outputted will include anything that is not
399
+ a directory, such as links.
400
+
401
+ Parameters
402
+ ----------
403
+ path: str
404
+ Root to recurse into
405
+ maxdepth: int
406
+ Maximum recursion depth. None means limitless, but not recommended
407
+ on link-based file-systems.
408
+ topdown: bool (True)
409
+ Whether to walk the directory tree from the top downwards or from
410
+ the bottom upwards.
411
+ on_error: "omit", "raise", a callable
412
+ if omit (default), path with exception will simply be empty;
413
+ If raise, an underlying exception will be raised;
414
+ if callable, it will be called with a single OSError instance as argument
415
+ kwargs: passed to ``ls``
416
+ """
417
+ if maxdepth is not None and maxdepth < 1:
418
+ raise ValueError("maxdepth must be at least 1")
419
+
420
+ path = self._strip_protocol(path)
421
+ full_dirs = {}
422
+ dirs = {}
423
+ files = {}
424
+
425
+ detail = kwargs.pop("detail", False)
426
+ try:
427
+ listing = self.ls(path, detail=True, **kwargs)
428
+ except (FileNotFoundError, OSError) as e:
429
+ if on_error == "raise":
430
+ raise
431
+ if callable(on_error):
432
+ on_error(e)
433
+ return
434
+
435
+ for info in listing:
436
+ # each info name must be at least [path]/part , but here
437
+ # we check also for names like [path]/part/
438
+ pathname = info["name"].rstrip("/")
439
+ name = pathname.rsplit("/", 1)[-1]
440
+ if info["type"] == "directory" and pathname != path:
441
+ # do not include "self" path
442
+ full_dirs[name] = pathname
443
+ dirs[name] = info
444
+ elif pathname == path:
445
+ # file-like with same name as give path
446
+ files[""] = info
447
+ else:
448
+ files[name] = info
449
+
450
+ if not detail:
451
+ dirs = list(dirs)
452
+ files = list(files)
453
+
454
+ if topdown:
455
+ # Yield before recursion if walking top down
456
+ yield path, dirs, files
457
+
458
+ if maxdepth is not None:
459
+ maxdepth -= 1
460
+ if maxdepth < 1:
461
+ if not topdown:
462
+ yield path, dirs, files
463
+ return
464
+
465
+ for d in dirs:
466
+ yield from self.walk(
467
+ full_dirs[d],
468
+ maxdepth=maxdepth,
469
+ detail=detail,
470
+ topdown=topdown,
471
+ **kwargs,
472
+ )
473
+
474
+ if not topdown:
475
+ # Yield after recursion if walking bottom up
476
+ yield path, dirs, files
477
+
478
+ def find(self, path, maxdepth=None, withdirs=False, detail=False, **kwargs):
479
+ """List all files below path.
480
+
481
+ Like posix ``find`` command without conditions
482
+
483
+ Parameters
484
+ ----------
485
+ path : str
486
+ maxdepth: int or None
487
+ If not None, the maximum number of levels to descend
488
+ withdirs: bool
489
+ Whether to include directory paths in the output. This is True
490
+ when used by glob, but users usually only want files.
491
+ kwargs are passed to ``ls``.
492
+ """
493
+ # TODO: allow equivalent of -name parameter
494
+ path = self._strip_protocol(path)
495
+ out = {}
496
+
497
+ # Add the root directory if withdirs is requested
498
+ # This is needed for posix glob compliance
499
+ if withdirs and path != "" and self.isdir(path):
500
+ out[path] = self.info(path)
501
+
502
+ for _, dirs, files in self.walk(path, maxdepth, detail=True, **kwargs):
503
+ if withdirs:
504
+ files.update(dirs)
505
+ out.update({info["name"]: info for name, info in files.items()})
506
+ if not out and self.isfile(path):
507
+ # walk works on directories, but find should also return [path]
508
+ # when path happens to be a file
509
+ out[path] = {}
510
+ names = sorted(out)
511
+ if not detail:
512
+ return names
513
+ else:
514
+ return {name: out[name] for name in names}
515
+
516
+ def du(self, path, total=True, maxdepth=None, withdirs=False, **kwargs):
517
+ """Space used by files and optionally directories within a path
518
+
519
+ Directory size does not include the size of its contents.
520
+
521
+ Parameters
522
+ ----------
523
+ path: str
524
+ total: bool
525
+ Whether to sum all the file sizes
526
+ maxdepth: int or None
527
+ Maximum number of directory levels to descend, None for unlimited.
528
+ withdirs: bool
529
+ Whether to include directory paths in the output.
530
+ kwargs: passed to ``find``
531
+
532
+ Returns
533
+ -------
534
+ Dict of {path: size} if total=False, or int otherwise, where numbers
535
+ refer to bytes used.
536
+ """
537
+ sizes = {}
538
+ if withdirs and self.isdir(path):
539
+ # Include top-level directory in output
540
+ info = self.info(path)
541
+ sizes[info["name"]] = info["size"]
542
+ for f in self.find(path, maxdepth=maxdepth, withdirs=withdirs, **kwargs):
543
+ info = self.info(f)
544
+ sizes[info["name"]] = info["size"]
545
+ if total:
546
+ return sum(sizes.values())
547
+ else:
548
+ return sizes
549
+
550
+ def glob(self, path, maxdepth=None, **kwargs):
551
+ """Find files by glob-matching.
552
+
553
+ Pattern matching capabilities for finding files that match the given pattern.
554
+
555
+ Parameters
556
+ ----------
557
+ path: str
558
+ The glob pattern to match against
559
+ maxdepth: int or None
560
+ Maximum depth for ``'**'`` patterns. Applied on the first ``'**'`` found.
561
+ Must be at least 1 if provided.
562
+ kwargs:
563
+ Additional arguments passed to ``find`` (e.g., detail=True)
564
+
565
+ Returns
566
+ -------
567
+ List of matched paths, or dict of paths and their info if detail=True
568
+
569
+ Notes
570
+ -----
571
+ Supported patterns:
572
+ - '*': Matches any sequence of characters within a single directory level
573
+ - ``'**'``: Matches any number of directory levels (must be an entire path component)
574
+ - '?': Matches exactly one character
575
+ - '[abc]': Matches any character in the set
576
+ - '[a-z]': Matches any character in the range
577
+ - '[!abc]': Matches any character NOT in the set
578
+
579
+ Special behaviors:
580
+ - If the path ends with '/', only folders are returned
581
+ - Consecutive '*' characters are compressed into a single '*'
582
+ - Empty brackets '[]' never match anything
583
+ - Negated empty brackets '[!]' match any single character
584
+ - Special characters in character classes are escaped properly
585
+
586
+ Limitations:
587
+ - ``'**'`` must be a complete path component (e.g., ``'a/**/b'``, not ``'a**b'``)
588
+ - No brace expansion ('{a,b}.txt')
589
+ - No extended glob patterns ('+(pattern)', '!(pattern)')
590
+ """
591
+ if maxdepth is not None and maxdepth < 1:
592
+ raise ValueError("maxdepth must be at least 1")
593
+
594
+ import re
595
+
596
+ seps = (os.path.sep, os.path.altsep) if os.path.altsep else (os.path.sep,)
597
+ ends_with_sep = path.endswith(seps) # _strip_protocol strips trailing slash
598
+ path = self._strip_protocol(path)
599
+ append_slash_to_dirname = ends_with_sep or path.endswith(
600
+ tuple(sep + "**" for sep in seps)
601
+ )
602
+ idx_star = path.find("*") if path.find("*") >= 0 else len(path)
603
+ idx_qmark = path.find("?") if path.find("?") >= 0 else len(path)
604
+ idx_brace = path.find("[") if path.find("[") >= 0 else len(path)
605
+
606
+ min_idx = min(idx_star, idx_qmark, idx_brace)
607
+
608
+ detail = kwargs.pop("detail", False)
609
+
610
+ if not has_magic(path):
611
+ if self.exists(path, **kwargs):
612
+ if not detail:
613
+ return [path]
614
+ else:
615
+ return {path: self.info(path, **kwargs)}
616
+ else:
617
+ if not detail:
618
+ return [] # glob of non-existent returns empty
619
+ else:
620
+ return {}
621
+ elif "/" in path[:min_idx]:
622
+ min_idx = path[:min_idx].rindex("/")
623
+ root = path[: min_idx + 1]
624
+ depth = path[min_idx + 1 :].count("/") + 1
625
+ else:
626
+ root = ""
627
+ depth = path[min_idx + 1 :].count("/") + 1
628
+
629
+ if "**" in path:
630
+ if maxdepth is not None:
631
+ idx_double_stars = path.find("**")
632
+ depth_double_stars = path[idx_double_stars:].count("/") + 1
633
+ depth = depth - depth_double_stars + maxdepth
634
+ else:
635
+ depth = None
636
+
637
+ allpaths = self.find(root, maxdepth=depth, withdirs=True, detail=True, **kwargs)
638
+
639
+ pattern = glob_translate(path + ("/" if ends_with_sep else ""))
640
+ pattern = re.compile(pattern)
641
+
642
+ out = {
643
+ p: info
644
+ for p, info in sorted(allpaths.items())
645
+ if pattern.match(
646
+ p + "/"
647
+ if append_slash_to_dirname and info["type"] == "directory"
648
+ else p
649
+ )
650
+ }
651
+
652
+ if detail:
653
+ return out
654
+ else:
655
+ return list(out)
656
+
657
+ def exists(self, path, **kwargs):
658
+ """Is there a file at the given path"""
659
+ try:
660
+ self.info(path, **kwargs)
661
+ return True
662
+ except: # noqa: E722
663
+ # any exception allowed bar FileNotFoundError?
664
+ return False
665
+
666
+ def lexists(self, path, **kwargs):
667
+ """If there is a file at the given path (including
668
+ broken links)"""
669
+ return self.exists(path)
670
+
671
+ def info(self, path, **kwargs):
672
+ """Give details of entry at path
673
+
674
+ Returns a single dictionary, with exactly the same information as ``ls``
675
+ would with ``detail=True``.
676
+
677
+ The default implementation calls ls and could be overridden by a
678
+ shortcut. kwargs are passed on to ```ls()``.
679
+
680
+ Some file systems might not be able to measure the file's size, in
681
+ which case, the returned dict will include ``'size': None``.
682
+
683
+ Returns
684
+ -------
685
+ dict with keys: name (full path in the FS), size (in bytes), type (file,
686
+ directory, or something else) and other FS-specific keys.
687
+ """
688
+ path = self._strip_protocol(path)
689
+ out = self.ls(self._parent(path), detail=True, **kwargs)
690
+ out = [o for o in out if o["name"].rstrip("/") == path]
691
+ if out:
692
+ return out[0]
693
+ out = self.ls(path, detail=True, **kwargs)
694
+ path = path.rstrip("/")
695
+ out1 = [o for o in out if o["name"].rstrip("/") == path]
696
+ if len(out1) == 1:
697
+ if "size" not in out1[0]:
698
+ out1[0]["size"] = None
699
+ return out1[0]
700
+ elif len(out1) > 1 or out:
701
+ return {"name": path, "size": 0, "type": "directory"}
702
+ else:
703
+ raise FileNotFoundError(path)
704
+
705
+ def checksum(self, path):
706
+ """Unique value for current version of file
707
+
708
+ If the checksum is the same from one moment to another, the contents
709
+ are guaranteed to be the same. If the checksum changes, the contents
710
+ *might* have changed.
711
+
712
+ This should normally be overridden; default will probably capture
713
+ creation/modification timestamp (which would be good) or maybe
714
+ access timestamp (which would be bad)
715
+ """
716
+ return int(tokenize(self.info(path)), 16)
717
+
718
+ def size(self, path):
719
+ """Size in bytes of file"""
720
+ return self.info(path).get("size", None)
721
+
722
+ def sizes(self, paths):
723
+ """Size in bytes of each file in a list of paths"""
724
+ return [self.size(p) for p in paths]
725
+
726
+ def isdir(self, path):
727
+ """Is this entry directory-like?"""
728
+ try:
729
+ return self.info(path)["type"] == "directory"
730
+ except OSError:
731
+ return False
732
+
733
+ def isfile(self, path):
734
+ """Is this entry file-like?"""
735
+ try:
736
+ return self.info(path)["type"] == "file"
737
+ except: # noqa: E722
738
+ return False
739
+
740
+ def read_text(self, path, encoding=None, errors=None, newline=None, **kwargs):
741
+ """Get the contents of the file as a string.
742
+
743
+ Parameters
744
+ ----------
745
+ path: str
746
+ URL of file on this filesystems
747
+ encoding, errors, newline: same as `open`.
748
+ """
749
+ with self.open(
750
+ path,
751
+ mode="r",
752
+ encoding=encoding,
753
+ errors=errors,
754
+ newline=newline,
755
+ **kwargs,
756
+ ) as f:
757
+ return f.read()
758
+
759
+ def write_text(
760
+ self, path, value, encoding=None, errors=None, newline=None, **kwargs
761
+ ):
762
+ """Write the text to the given file.
763
+
764
+ An existing file will be overwritten.
765
+
766
+ Parameters
767
+ ----------
768
+ path: str
769
+ URL of file on this filesystems
770
+ value: str
771
+ Text to write.
772
+ encoding, errors, newline: same as `open`.
773
+ """
774
+ with self.open(
775
+ path,
776
+ mode="w",
777
+ encoding=encoding,
778
+ errors=errors,
779
+ newline=newline,
780
+ **kwargs,
781
+ ) as f:
782
+ return f.write(value)
783
+
784
+ def cat_file(self, path, start=None, end=None, **kwargs):
785
+ """Get the content of a file
786
+
787
+ Parameters
788
+ ----------
789
+ path: URL of file on this filesystems
790
+ start, end: int
791
+ Bytes limits of the read. If negative, backwards from end,
792
+ like usual python slices. Either can be None for start or
793
+ end of file, respectively
794
+ kwargs: passed to ``open()``.
795
+ """
796
+ # explicitly set buffering off?
797
+ with self.open(path, "rb", **kwargs) as f:
798
+ if start is not None:
799
+ if start >= 0:
800
+ f.seek(start)
801
+ else:
802
+ f.seek(max(0, f.size + start))
803
+ if end is not None:
804
+ if end < 0:
805
+ end = f.size + end
806
+ return f.read(end - f.tell())
807
+ return f.read()
808
+
809
+ def pipe_file(self, path, value, mode="overwrite", **kwargs):
810
+ """Set the bytes of given file"""
811
+ if mode == "create" and self.exists(path):
812
+ # non-atomic but simple way; or could use "xb" in open(), which is likely
813
+ # not as well supported
814
+ raise FileExistsError
815
+ with self.open(path, "wb", **kwargs) as f:
816
+ f.write(value)
817
+
818
+ def pipe(self, path, value=None, **kwargs):
819
+ """Put value into path
820
+
821
+ (counterpart to ``cat``)
822
+
823
+ Parameters
824
+ ----------
825
+ path: string or dict(str, bytes)
826
+ If a string, a single remote location to put ``value`` bytes; if a dict,
827
+ a mapping of {path: bytesvalue}.
828
+ value: bytes, optional
829
+ If using a single path, these are the bytes to put there. Ignored if
830
+ ``path`` is a dict
831
+ """
832
+ if isinstance(path, str):
833
+ self.pipe_file(self._strip_protocol(path), value, **kwargs)
834
+ elif isinstance(path, dict):
835
+ for k, v in path.items():
836
+ self.pipe_file(self._strip_protocol(k), v, **kwargs)
837
+ else:
838
+ raise ValueError("path must be str or dict")
839
+
840
+ def cat_ranges(
841
+ self, paths, starts, ends, max_gap=None, on_error="return", **kwargs
842
+ ):
843
+ """Get the contents of byte ranges from one or more files
844
+
845
+ Parameters
846
+ ----------
847
+ paths: list
848
+ A list of of filepaths on this filesystems
849
+ starts, ends: int or list
850
+ Bytes limits of the read. If using a single int, the same value will be
851
+ used to read all the specified files.
852
+ """
853
+ if max_gap is not None:
854
+ raise NotImplementedError
855
+ if not isinstance(paths, list):
856
+ raise TypeError
857
+ if not isinstance(starts, list):
858
+ starts = [starts] * len(paths)
859
+ if not isinstance(ends, list):
860
+ ends = [ends] * len(paths)
861
+ if len(starts) != len(paths) or len(ends) != len(paths):
862
+ raise ValueError
863
+ out = []
864
+ for p, s, e in zip(paths, starts, ends):
865
+ try:
866
+ out.append(self.cat_file(p, s, e))
867
+ except Exception as e:
868
+ if on_error == "return":
869
+ out.append(e)
870
+ else:
871
+ raise
872
+ return out
873
+
874
+ def cat(self, path, recursive=False, on_error="raise", **kwargs):
875
+ """Fetch (potentially multiple) paths' contents
876
+
877
+ Parameters
878
+ ----------
879
+ recursive: bool
880
+ If True, assume the path(s) are directories, and get all the
881
+ contained files
882
+ on_error : "raise", "omit", "return"
883
+ If raise, an underlying exception will be raised (converted to KeyError
884
+ if the type is in self.missing_exceptions); if omit, keys with exception
885
+ will simply not be included in the output; if "return", all keys are
886
+ included in the output, but the value will be bytes or an exception
887
+ instance.
888
+ kwargs: passed to cat_file
889
+
890
+ Returns
891
+ -------
892
+ dict of {path: contents} if there are multiple paths
893
+ or the path has been otherwise expanded
894
+ """
895
+ paths = self.expand_path(path, recursive=recursive)
896
+ if (
897
+ len(paths) > 1
898
+ or isinstance(path, list)
899
+ or paths[0] != self._strip_protocol(path)
900
+ ):
901
+ out = {}
902
+ for path in paths:
903
+ try:
904
+ out[path] = self.cat_file(path, **kwargs)
905
+ except Exception as e:
906
+ if on_error == "raise":
907
+ raise
908
+ if on_error == "return":
909
+ out[path] = e
910
+ return out
911
+ else:
912
+ return self.cat_file(paths[0], **kwargs)
913
+
914
+ def get_file(self, rpath, lpath, callback=DEFAULT_CALLBACK, outfile=None, **kwargs):
915
+ """Copy single remote file to local"""
916
+ from .implementations.local import LocalFileSystem
917
+
918
+ if isfilelike(lpath):
919
+ outfile = lpath
920
+ elif self.isdir(rpath):
921
+ os.makedirs(lpath, exist_ok=True)
922
+ return None
923
+
924
+ fs = LocalFileSystem(auto_mkdir=True)
925
+ fs.makedirs(fs._parent(lpath), exist_ok=True)
926
+
927
+ with self.open(rpath, "rb", **kwargs) as f1:
928
+ if outfile is None:
929
+ outfile = open(lpath, "wb")
930
+
931
+ try:
932
+ callback.set_size(getattr(f1, "size", None))
933
+ data = True
934
+ while data:
935
+ data = f1.read(self.blocksize)
936
+ segment_len = outfile.write(data)
937
+ if segment_len is None:
938
+ segment_len = len(data)
939
+ callback.relative_update(segment_len)
940
+ finally:
941
+ if not isfilelike(lpath):
942
+ outfile.close()
943
+
944
+ def get(
945
+ self,
946
+ rpath,
947
+ lpath,
948
+ recursive=False,
949
+ callback=DEFAULT_CALLBACK,
950
+ maxdepth=None,
951
+ **kwargs,
952
+ ):
953
+ """Copy file(s) to local.
954
+
955
+ Copies a specific file or tree of files (if recursive=True). If lpath
956
+ ends with a "/", it will be assumed to be a directory, and target files
957
+ will go within. Can submit a list of paths, which may be glob-patterns
958
+ and will be expanded.
959
+
960
+ Calls get_file for each source.
961
+ """
962
+ if isinstance(lpath, list) and isinstance(rpath, list):
963
+ # No need to expand paths when both source and destination
964
+ # are provided as lists
965
+ rpaths = rpath
966
+ lpaths = lpath
967
+ else:
968
+ from .implementations.local import (
969
+ LocalFileSystem,
970
+ make_path_posix,
971
+ trailing_sep,
972
+ )
973
+
974
+ source_is_str = isinstance(rpath, str)
975
+ rpaths = self.expand_path(rpath, recursive=recursive, maxdepth=maxdepth)
976
+ if source_is_str and (not recursive or maxdepth is not None):
977
+ # Non-recursive glob does not copy directories
978
+ rpaths = [p for p in rpaths if not (trailing_sep(p) or self.isdir(p))]
979
+ if not rpaths:
980
+ return
981
+
982
+ if isinstance(lpath, str):
983
+ lpath = make_path_posix(lpath)
984
+
985
+ source_is_file = len(rpaths) == 1
986
+ dest_is_dir = isinstance(lpath, str) and (
987
+ trailing_sep(lpath) or LocalFileSystem().isdir(lpath)
988
+ )
989
+
990
+ exists = source_is_str and (
991
+ (has_magic(rpath) and source_is_file)
992
+ or (not has_magic(rpath) and dest_is_dir and not trailing_sep(rpath))
993
+ )
994
+ lpaths = other_paths(
995
+ rpaths,
996
+ lpath,
997
+ exists=exists,
998
+ flatten=not source_is_str,
999
+ )
1000
+
1001
+ callback.set_size(len(lpaths))
1002
+ for lpath, rpath in callback.wrap(zip(lpaths, rpaths)):
1003
+ with callback.branched(rpath, lpath) as child:
1004
+ self.get_file(rpath, lpath, callback=child, **kwargs)
1005
+
1006
+ def put_file(
1007
+ self, lpath, rpath, callback=DEFAULT_CALLBACK, mode="overwrite", **kwargs
1008
+ ):
1009
+ """Copy single file to remote"""
1010
+ if mode == "create" and self.exists(rpath):
1011
+ raise FileExistsError
1012
+ if os.path.isdir(lpath):
1013
+ self.makedirs(rpath, exist_ok=True)
1014
+ return None
1015
+
1016
+ with open(lpath, "rb") as f1:
1017
+ size = f1.seek(0, 2)
1018
+ callback.set_size(size)
1019
+ f1.seek(0)
1020
+
1021
+ self.mkdirs(self._parent(os.fspath(rpath)), exist_ok=True)
1022
+ with self.open(rpath, "wb", **kwargs) as f2:
1023
+ while f1.tell() < size:
1024
+ data = f1.read(self.blocksize)
1025
+ segment_len = f2.write(data)
1026
+ if segment_len is None:
1027
+ segment_len = len(data)
1028
+ callback.relative_update(segment_len)
1029
+
1030
+ def put(
1031
+ self,
1032
+ lpath,
1033
+ rpath,
1034
+ recursive=False,
1035
+ callback=DEFAULT_CALLBACK,
1036
+ maxdepth=None,
1037
+ **kwargs,
1038
+ ):
1039
+ """Copy file(s) from local.
1040
+
1041
+ Copies a specific file or tree of files (if recursive=True). If rpath
1042
+ ends with a "/", it will be assumed to be a directory, and target files
1043
+ will go within.
1044
+
1045
+ Calls put_file for each source.
1046
+ """
1047
+ if isinstance(lpath, list) and isinstance(rpath, list):
1048
+ # No need to expand paths when both source and destination
1049
+ # are provided as lists
1050
+ rpaths = rpath
1051
+ lpaths = lpath
1052
+ else:
1053
+ from .implementations.local import (
1054
+ LocalFileSystem,
1055
+ make_path_posix,
1056
+ trailing_sep,
1057
+ )
1058
+
1059
+ source_is_str = isinstance(lpath, str)
1060
+ if source_is_str:
1061
+ lpath = make_path_posix(lpath)
1062
+ fs = LocalFileSystem()
1063
+ lpaths = fs.expand_path(lpath, recursive=recursive, maxdepth=maxdepth)
1064
+ if source_is_str and (not recursive or maxdepth is not None):
1065
+ # Non-recursive glob does not copy directories
1066
+ lpaths = [p for p in lpaths if not (trailing_sep(p) or fs.isdir(p))]
1067
+ if not lpaths:
1068
+ return
1069
+
1070
+ source_is_file = len(lpaths) == 1
1071
+ dest_is_dir = isinstance(rpath, str) and (
1072
+ trailing_sep(rpath) or self.isdir(rpath)
1073
+ )
1074
+
1075
+ rpath = (
1076
+ self._strip_protocol(rpath)
1077
+ if isinstance(rpath, str)
1078
+ else [self._strip_protocol(p) for p in rpath]
1079
+ )
1080
+ exists = source_is_str and (
1081
+ (has_magic(lpath) and source_is_file)
1082
+ or (not has_magic(lpath) and dest_is_dir and not trailing_sep(lpath))
1083
+ )
1084
+ rpaths = other_paths(
1085
+ lpaths,
1086
+ rpath,
1087
+ exists=exists,
1088
+ flatten=not source_is_str,
1089
+ )
1090
+
1091
+ callback.set_size(len(rpaths))
1092
+ for lpath, rpath in callback.wrap(zip(lpaths, rpaths)):
1093
+ with callback.branched(lpath, rpath) as child:
1094
+ self.put_file(lpath, rpath, callback=child, **kwargs)
1095
+
1096
+ def head(self, path, size=1024):
1097
+ """Get the first ``size`` bytes from file"""
1098
+ with self.open(path, "rb") as f:
1099
+ return f.read(size)
1100
+
1101
+ def tail(self, path, size=1024):
1102
+ """Get the last ``size`` bytes from file"""
1103
+ with self.open(path, "rb") as f:
1104
+ f.seek(max(-size, -f.size), 2)
1105
+ return f.read()
1106
+
1107
+ def cp_file(self, path1, path2, **kwargs):
1108
+ raise NotImplementedError
1109
+
1110
+ def copy(
1111
+ self, path1, path2, recursive=False, maxdepth=None, on_error=None, **kwargs
1112
+ ):
1113
+ """Copy within two locations in the filesystem
1114
+
1115
+ on_error : "raise", "ignore"
1116
+ If raise, any not-found exceptions will be raised; if ignore any
1117
+ not-found exceptions will cause the path to be skipped; defaults to
1118
+ raise unless recursive is true, where the default is ignore
1119
+ """
1120
+ if on_error is None and recursive:
1121
+ on_error = "ignore"
1122
+ elif on_error is None:
1123
+ on_error = "raise"
1124
+
1125
+ if isinstance(path1, list) and isinstance(path2, list):
1126
+ # No need to expand paths when both source and destination
1127
+ # are provided as lists
1128
+ paths1 = path1
1129
+ paths2 = path2
1130
+ else:
1131
+ from .implementations.local import trailing_sep
1132
+
1133
+ source_is_str = isinstance(path1, str)
1134
+ paths1 = self.expand_path(path1, recursive=recursive, maxdepth=maxdepth)
1135
+ if source_is_str and (not recursive or maxdepth is not None):
1136
+ # Non-recursive glob does not copy directories
1137
+ paths1 = [p for p in paths1 if not (trailing_sep(p) or self.isdir(p))]
1138
+ if not paths1:
1139
+ return
1140
+
1141
+ source_is_file = len(paths1) == 1
1142
+ dest_is_dir = isinstance(path2, str) and (
1143
+ trailing_sep(path2) or self.isdir(path2)
1144
+ )
1145
+
1146
+ exists = source_is_str and (
1147
+ (has_magic(path1) and source_is_file)
1148
+ or (not has_magic(path1) and dest_is_dir and not trailing_sep(path1))
1149
+ )
1150
+ paths2 = other_paths(
1151
+ paths1,
1152
+ path2,
1153
+ exists=exists,
1154
+ flatten=not source_is_str,
1155
+ )
1156
+
1157
+ for p1, p2 in zip(paths1, paths2):
1158
+ try:
1159
+ self.cp_file(p1, p2, **kwargs)
1160
+ except FileNotFoundError:
1161
+ if on_error == "raise":
1162
+ raise
1163
+
1164
+ def expand_path(self, path, recursive=False, maxdepth=None, **kwargs):
1165
+ """Turn one or more globs or directories into a list of all matching paths
1166
+ to files or directories.
1167
+
1168
+ kwargs are passed to ``glob`` or ``find``, which may in turn call ``ls``
1169
+ """
1170
+
1171
+ if maxdepth is not None and maxdepth < 1:
1172
+ raise ValueError("maxdepth must be at least 1")
1173
+
1174
+ if isinstance(path, (str, os.PathLike)):
1175
+ out = self.expand_path([path], recursive, maxdepth)
1176
+ else:
1177
+ out = set()
1178
+ path = [self._strip_protocol(p) for p in path]
1179
+ for p in path:
1180
+ if has_magic(p):
1181
+ bit = set(self.glob(p, maxdepth=maxdepth, **kwargs))
1182
+ out |= bit
1183
+ if recursive:
1184
+ # glob call above expanded one depth so if maxdepth is defined
1185
+ # then decrement it in expand_path call below. If it is zero
1186
+ # after decrementing then avoid expand_path call.
1187
+ if maxdepth is not None and maxdepth <= 1:
1188
+ continue
1189
+ out |= set(
1190
+ self.expand_path(
1191
+ list(bit),
1192
+ recursive=recursive,
1193
+ maxdepth=maxdepth - 1 if maxdepth is not None else None,
1194
+ **kwargs,
1195
+ )
1196
+ )
1197
+ continue
1198
+ elif recursive:
1199
+ rec = set(
1200
+ self.find(
1201
+ p, maxdepth=maxdepth, withdirs=True, detail=False, **kwargs
1202
+ )
1203
+ )
1204
+ out |= rec
1205
+ if p not in out and (recursive is False or self.exists(p)):
1206
+ # should only check once, for the root
1207
+ out.add(p)
1208
+ if not out:
1209
+ raise FileNotFoundError(path)
1210
+ return sorted(out)
1211
+
1212
+ def mv(self, path1, path2, recursive=False, maxdepth=None, **kwargs):
1213
+ """Move file(s) from one location to another"""
1214
+ if path1 == path2:
1215
+ logger.debug("%s mv: The paths are the same, so no files were moved.", self)
1216
+ else:
1217
+ # explicitly raise exception to prevent data corruption
1218
+ self.copy(
1219
+ path1, path2, recursive=recursive, maxdepth=maxdepth, onerror="raise"
1220
+ )
1221
+ self.rm(path1, recursive=recursive)
1222
+
1223
+ def rm_file(self, path):
1224
+ """Delete a file"""
1225
+ self._rm(path)
1226
+
1227
+ def _rm(self, path):
1228
+ """Delete one file"""
1229
+ # this is the old name for the method, prefer rm_file
1230
+ raise NotImplementedError
1231
+
1232
+ def rm(self, path, recursive=False, maxdepth=None):
1233
+ """Delete files.
1234
+
1235
+ Parameters
1236
+ ----------
1237
+ path: str or list of str
1238
+ File(s) to delete.
1239
+ recursive: bool
1240
+ If file(s) are directories, recursively delete contents and then
1241
+ also remove the directory
1242
+ maxdepth: int or None
1243
+ Depth to pass to walk for finding files to delete, if recursive.
1244
+ If None, there will be no limit and infinite recursion may be
1245
+ possible.
1246
+ """
1247
+ path = self.expand_path(path, recursive=recursive, maxdepth=maxdepth)
1248
+ for p in reversed(path):
1249
+ self.rm_file(p)
1250
+
1251
+ @classmethod
1252
+ def _parent(cls, path):
1253
+ path = cls._strip_protocol(path)
1254
+ if "/" in path:
1255
+ parent = path.rsplit("/", 1)[0].lstrip(cls.root_marker)
1256
+ return cls.root_marker + parent
1257
+ else:
1258
+ return cls.root_marker
1259
+
1260
+ def _open(
1261
+ self,
1262
+ path,
1263
+ mode="rb",
1264
+ block_size=None,
1265
+ autocommit=True,
1266
+ cache_options=None,
1267
+ **kwargs,
1268
+ ):
1269
+ """Return raw bytes-mode file-like from the file-system"""
1270
+ return AbstractBufferedFile(
1271
+ self,
1272
+ path,
1273
+ mode,
1274
+ block_size,
1275
+ autocommit,
1276
+ cache_options=cache_options,
1277
+ **kwargs,
1278
+ )
1279
+
1280
+ def open(
1281
+ self,
1282
+ path,
1283
+ mode="rb",
1284
+ block_size=None,
1285
+ cache_options=None,
1286
+ compression=None,
1287
+ **kwargs,
1288
+ ):
1289
+ """
1290
+ Return a file-like object from the filesystem
1291
+
1292
+ The resultant instance must function correctly in a context ``with``
1293
+ block.
1294
+
1295
+ Parameters
1296
+ ----------
1297
+ path: str
1298
+ Target file
1299
+ mode: str like 'rb', 'w'
1300
+ See builtin ``open()``
1301
+ Mode "x" (exclusive write) may be implemented by the backend. Even if
1302
+ it is, whether it is checked up front or on commit, and whether it is
1303
+ atomic is implementation-dependent.
1304
+ block_size: int
1305
+ Some indication of buffering - this is a value in bytes
1306
+ cache_options : dict, optional
1307
+ Extra arguments to pass through to the cache.
1308
+ compression: string or None
1309
+ If given, open file using compression codec. Can either be a compression
1310
+ name (a key in ``fsspec.compression.compr``) or "infer" to guess the
1311
+ compression from the filename suffix.
1312
+ encoding, errors, newline: passed on to TextIOWrapper for text mode
1313
+ """
1314
+ import io
1315
+
1316
+ path = self._strip_protocol(path)
1317
+ if "b" not in mode:
1318
+ mode = mode.replace("t", "") + "b"
1319
+
1320
+ text_kwargs = {
1321
+ k: kwargs.pop(k)
1322
+ for k in ["encoding", "errors", "newline"]
1323
+ if k in kwargs
1324
+ }
1325
+ return io.TextIOWrapper(
1326
+ self.open(
1327
+ path,
1328
+ mode,
1329
+ block_size=block_size,
1330
+ cache_options=cache_options,
1331
+ compression=compression,
1332
+ **kwargs,
1333
+ ),
1334
+ **text_kwargs,
1335
+ )
1336
+ else:
1337
+ ac = kwargs.pop("autocommit", not self._intrans)
1338
+ f = self._open(
1339
+ path,
1340
+ mode=mode,
1341
+ block_size=block_size,
1342
+ autocommit=ac,
1343
+ cache_options=cache_options,
1344
+ **kwargs,
1345
+ )
1346
+ if compression is not None:
1347
+ from fsspec.compression import compr
1348
+ from fsspec.core import get_compression
1349
+
1350
+ compression = get_compression(path, compression)
1351
+ compress = compr[compression]
1352
+ f = compress(f, mode=mode[0])
1353
+
1354
+ if not ac and "r" not in mode:
1355
+ self.transaction.files.append(f)
1356
+ return f
1357
+
1358
+ def touch(self, path, truncate=True, **kwargs):
1359
+ """Create empty file, or update timestamp
1360
+
1361
+ Parameters
1362
+ ----------
1363
+ path: str
1364
+ file location
1365
+ truncate: bool
1366
+ If True, always set file size to 0; if False, update timestamp and
1367
+ leave file unchanged, if backend allows this
1368
+ """
1369
+ if truncate or not self.exists(path):
1370
+ with self.open(path, "wb", **kwargs):
1371
+ pass
1372
+ else:
1373
+ raise NotImplementedError # update timestamp, if possible
1374
+
1375
+ def ukey(self, path):
1376
+ """Hash of file properties, to tell if it has changed"""
1377
+ return sha256(str(self.info(path)).encode()).hexdigest()
1378
+
1379
+ def read_block(self, fn, offset, length, delimiter=None):
1380
+ """Read a block of bytes from
1381
+
1382
+ Starting at ``offset`` of the file, read ``length`` bytes. If
1383
+ ``delimiter`` is set then we ensure that the read starts and stops at
1384
+ delimiter boundaries that follow the locations ``offset`` and ``offset
1385
+ + length``. If ``offset`` is zero then we start at zero. The
1386
+ bytestring returned WILL include the end delimiter string.
1387
+
1388
+ If offset+length is beyond the eof, reads to eof.
1389
+
1390
+ Parameters
1391
+ ----------
1392
+ fn: string
1393
+ Path to filename
1394
+ offset: int
1395
+ Byte offset to start read
1396
+ length: int
1397
+ Number of bytes to read. If None, read to end.
1398
+ delimiter: bytes (optional)
1399
+ Ensure reading starts and stops at delimiter bytestring
1400
+
1401
+ Examples
1402
+ --------
1403
+ >>> fs.read_block('data/file.csv', 0, 13) # doctest: +SKIP
1404
+ b'Alice, 100\\nBo'
1405
+ >>> fs.read_block('data/file.csv', 0, 13, delimiter=b'\\n') # doctest: +SKIP
1406
+ b'Alice, 100\\nBob, 200\\n'
1407
+
1408
+ Use ``length=None`` to read to the end of the file.
1409
+ >>> fs.read_block('data/file.csv', 0, None, delimiter=b'\\n') # doctest: +SKIP
1410
+ b'Alice, 100\\nBob, 200\\nCharlie, 300'
1411
+
1412
+ See Also
1413
+ --------
1414
+ :func:`fsspec.utils.read_block`
1415
+ """
1416
+ with self.open(fn, "rb") as f:
1417
+ size = f.size
1418
+ if length is None:
1419
+ length = size
1420
+ if size is not None and offset + length > size:
1421
+ length = size - offset
1422
+ return read_block(f, offset, length, delimiter)
1423
+
1424
+ def to_json(self, *, include_password: bool = True) -> str:
1425
+ """
1426
+ JSON representation of this filesystem instance.
1427
+
1428
+ Parameters
1429
+ ----------
1430
+ include_password: bool, default True
1431
+ Whether to include the password (if any) in the output.
1432
+
1433
+ Returns
1434
+ -------
1435
+ JSON string with keys ``cls`` (the python location of this class),
1436
+ protocol (text name of this class's protocol, first one in case of
1437
+ multiple), ``args`` (positional args, usually empty), and all other
1438
+ keyword arguments as their own keys.
1439
+
1440
+ Warnings
1441
+ --------
1442
+ Serialized filesystems may contain sensitive information which have been
1443
+ passed to the constructor, such as passwords and tokens. Make sure you
1444
+ store and send them in a secure environment!
1445
+ """
1446
+ from .json import FilesystemJSONEncoder
1447
+
1448
+ return json.dumps(
1449
+ self,
1450
+ cls=type(
1451
+ "_FilesystemJSONEncoder",
1452
+ (FilesystemJSONEncoder,),
1453
+ {"include_password": include_password},
1454
+ ),
1455
+ )
1456
+
1457
+ @staticmethod
1458
+ def from_json(blob: str) -> AbstractFileSystem:
1459
+ """
1460
+ Recreate a filesystem instance from JSON representation.
1461
+
1462
+ See ``.to_json()`` for the expected structure of the input.
1463
+
1464
+ Parameters
1465
+ ----------
1466
+ blob: str
1467
+
1468
+ Returns
1469
+ -------
1470
+ file system instance, not necessarily of this particular class.
1471
+
1472
+ Warnings
1473
+ --------
1474
+ This can import arbitrary modules (as determined by the ``cls`` key).
1475
+ Make sure you haven't installed any modules that may execute malicious code
1476
+ at import time.
1477
+ """
1478
+ from .json import FilesystemJSONDecoder
1479
+
1480
+ return json.loads(blob, cls=FilesystemJSONDecoder)
1481
+
1482
+ def to_dict(self, *, include_password: bool = True) -> dict[str, Any]:
1483
+ """
1484
+ JSON-serializable dictionary representation of this filesystem instance.
1485
+
1486
+ Parameters
1487
+ ----------
1488
+ include_password: bool, default True
1489
+ Whether to include the password (if any) in the output.
1490
+
1491
+ Returns
1492
+ -------
1493
+ Dictionary with keys ``cls`` (the python location of this class),
1494
+ protocol (text name of this class's protocol, first one in case of
1495
+ multiple), ``args`` (positional args, usually empty), and all other
1496
+ keyword arguments as their own keys.
1497
+
1498
+ Warnings
1499
+ --------
1500
+ Serialized filesystems may contain sensitive information which have been
1501
+ passed to the constructor, such as passwords and tokens. Make sure you
1502
+ store and send them in a secure environment!
1503
+ """
1504
+ from .json import FilesystemJSONEncoder
1505
+
1506
+ json_encoder = FilesystemJSONEncoder()
1507
+
1508
+ cls = type(self)
1509
+ proto = self.protocol
1510
+
1511
+ storage_options = dict(self.storage_options)
1512
+ if not include_password:
1513
+ storage_options.pop("password", None)
1514
+
1515
+ return dict(
1516
+ cls=f"{cls.__module__}:{cls.__name__}",
1517
+ protocol=proto[0] if isinstance(proto, (tuple, list)) else proto,
1518
+ args=json_encoder.make_serializable(self.storage_args),
1519
+ **json_encoder.make_serializable(storage_options),
1520
+ )
1521
+
1522
+ @staticmethod
1523
+ def from_dict(dct: dict[str, Any]) -> AbstractFileSystem:
1524
+ """
1525
+ Recreate a filesystem instance from dictionary representation.
1526
+
1527
+ See ``.to_dict()`` for the expected structure of the input.
1528
+
1529
+ Parameters
1530
+ ----------
1531
+ dct: Dict[str, Any]
1532
+
1533
+ Returns
1534
+ -------
1535
+ file system instance, not necessarily of this particular class.
1536
+
1537
+ Warnings
1538
+ --------
1539
+ This can import arbitrary modules (as determined by the ``cls`` key).
1540
+ Make sure you haven't installed any modules that may execute malicious code
1541
+ at import time.
1542
+ """
1543
+ from .json import FilesystemJSONDecoder
1544
+
1545
+ json_decoder = FilesystemJSONDecoder()
1546
+
1547
+ dct = dict(dct) # Defensive copy
1548
+
1549
+ cls = FilesystemJSONDecoder.try_resolve_fs_cls(dct)
1550
+ if cls is None:
1551
+ raise ValueError("Not a serialized AbstractFileSystem")
1552
+
1553
+ dct.pop("cls", None)
1554
+ dct.pop("protocol", None)
1555
+
1556
+ return cls(
1557
+ *json_decoder.unmake_serializable(dct.pop("args", ())),
1558
+ **json_decoder.unmake_serializable(dct),
1559
+ )
1560
+
1561
+ def _get_pyarrow_filesystem(self):
1562
+ """
1563
+ Make a version of the FS instance which will be acceptable to pyarrow
1564
+ """
1565
+ # all instances already also derive from pyarrow
1566
+ return self
1567
+
1568
+ def get_mapper(self, root="", check=False, create=False, missing_exceptions=None):
1569
+ """Create key/value store based on this file-system
1570
+
1571
+ Makes a MutableMapping interface to the FS at the given root path.
1572
+ See ``fsspec.mapping.FSMap`` for further details.
1573
+ """
1574
+ from .mapping import FSMap
1575
+
1576
+ return FSMap(
1577
+ root,
1578
+ self,
1579
+ check=check,
1580
+ create=create,
1581
+ missing_exceptions=missing_exceptions,
1582
+ )
1583
+
1584
+ @classmethod
1585
+ def clear_instance_cache(cls):
1586
+ """
1587
+ Clear the cache of filesystem instances.
1588
+
1589
+ Notes
1590
+ -----
1591
+ Unless overridden by setting the ``cachable`` class attribute to False,
1592
+ the filesystem class stores a reference to newly created instances. This
1593
+ prevents Python's normal rules around garbage collection from working,
1594
+ since the instances refcount will not drop to zero until
1595
+ ``clear_instance_cache`` is called.
1596
+ """
1597
+ cls._cache.clear()
1598
+
1599
+ def created(self, path):
1600
+ """Return the created timestamp of a file as a datetime.datetime"""
1601
+ raise NotImplementedError
1602
+
1603
+ def modified(self, path):
1604
+ """Return the modified timestamp of a file as a datetime.datetime"""
1605
+ raise NotImplementedError
1606
+
1607
+ def tree(
1608
+ self,
1609
+ path: str = "/",
1610
+ recursion_limit: int = 2,
1611
+ max_display: int = 25,
1612
+ display_size: bool = False,
1613
+ prefix: str = "",
1614
+ is_last: bool = True,
1615
+ first: bool = True,
1616
+ indent_size: int = 4,
1617
+ ) -> str:
1618
+ """
1619
+ Return a tree-like structure of the filesystem starting from the given path as a string.
1620
+
1621
+ Parameters
1622
+ ----------
1623
+ path: Root path to start traversal from
1624
+ recursion_limit: Maximum depth of directory traversal
1625
+ max_display: Maximum number of items to display per directory
1626
+ display_size: Whether to display file sizes
1627
+ prefix: Current line prefix for visual tree structure
1628
+ is_last: Whether current item is last in its level
1629
+ first: Whether this is the first call (displays root path)
1630
+ indent_size: Number of spaces by indent
1631
+
1632
+ Returns
1633
+ -------
1634
+ str: A string representing the tree structure.
1635
+
1636
+ Example
1637
+ -------
1638
+ >>> from fsspec import filesystem
1639
+
1640
+ >>> fs = filesystem('ftp', host='test.rebex.net', user='demo', password='password')
1641
+ >>> tree = fs.tree(display_size=True, recursion_limit=3, indent_size=8, max_display=10)
1642
+ >>> print(tree)
1643
+ """
1644
+
1645
+ def format_bytes(n: int) -> str:
1646
+ """Format bytes as text."""
1647
+ for prefix, k in (
1648
+ ("P", 2**50),
1649
+ ("T", 2**40),
1650
+ ("G", 2**30),
1651
+ ("M", 2**20),
1652
+ ("k", 2**10),
1653
+ ):
1654
+ if n >= 0.9 * k:
1655
+ return f"{n / k:.2f} {prefix}b"
1656
+ return f"{n}B"
1657
+
1658
+ result = []
1659
+
1660
+ if first:
1661
+ result.append(path)
1662
+
1663
+ if recursion_limit:
1664
+ indent = " " * indent_size
1665
+ contents = self.ls(path, detail=True)
1666
+ contents.sort(
1667
+ key=lambda x: (x.get("type") != "directory", x.get("name", ""))
1668
+ )
1669
+
1670
+ if max_display is not None and len(contents) > max_display:
1671
+ displayed_contents = contents[:max_display]
1672
+ remaining_count = len(contents) - max_display
1673
+ else:
1674
+ displayed_contents = contents
1675
+ remaining_count = 0
1676
+
1677
+ for i, item in enumerate(displayed_contents):
1678
+ is_last_item = (i == len(displayed_contents) - 1) and (
1679
+ remaining_count == 0
1680
+ )
1681
+
1682
+ branch = (
1683
+ "└" + ("─" * (indent_size - 2))
1684
+ if is_last_item
1685
+ else "├" + ("─" * (indent_size - 2))
1686
+ )
1687
+ branch += " "
1688
+ new_prefix = prefix + (
1689
+ indent if is_last_item else "│" + " " * (indent_size - 1)
1690
+ )
1691
+
1692
+ name = os.path.basename(item.get("name", ""))
1693
+
1694
+ if display_size and item.get("type") == "directory":
1695
+ sub_contents = self.ls(item.get("name", ""), detail=True)
1696
+ num_files = sum(
1697
+ 1 for sub_item in sub_contents if sub_item.get("type") == "file"
1698
+ )
1699
+ num_folders = sum(
1700
+ 1
1701
+ for sub_item in sub_contents
1702
+ if sub_item.get("type") == "directory"
1703
+ )
1704
+
1705
+ if num_files == 0 and num_folders == 0:
1706
+ size = " (empty folder)"
1707
+ elif num_files == 0:
1708
+ size = f" ({num_folders} subfolder{'s' if num_folders > 1 else ''})"
1709
+ elif num_folders == 0:
1710
+ size = f" ({num_files} file{'s' if num_files > 1 else ''})"
1711
+ else:
1712
+ size = f" ({num_files} file{'s' if num_files > 1 else ''}, {num_folders} subfolder{'s' if num_folders > 1 else ''})"
1713
+ elif display_size and item.get("type") == "file":
1714
+ size = f" ({format_bytes(item.get('size', 0))})"
1715
+ else:
1716
+ size = ""
1717
+
1718
+ result.append(f"{prefix}{branch}{name}{size}")
1719
+
1720
+ if item.get("type") == "directory" and recursion_limit > 0:
1721
+ result.append(
1722
+ self.tree(
1723
+ path=item.get("name", ""),
1724
+ recursion_limit=recursion_limit - 1,
1725
+ max_display=max_display,
1726
+ display_size=display_size,
1727
+ prefix=new_prefix,
1728
+ is_last=is_last_item,
1729
+ first=False,
1730
+ indent_size=indent_size,
1731
+ )
1732
+ )
1733
+
1734
+ if remaining_count > 0:
1735
+ more_message = f"{remaining_count} more item(s) not displayed."
1736
+ result.append(
1737
+ f"{prefix}{'└' + ('─' * (indent_size - 2))} {more_message}"
1738
+ )
1739
+
1740
+ return "\n".join(_ for _ in result if _)
1741
+
1742
+ # ------------------------------------------------------------------------
1743
+ # Aliases
1744
+
1745
+ def read_bytes(self, path, start=None, end=None, **kwargs):
1746
+ """Alias of `AbstractFileSystem.cat_file`."""
1747
+ return self.cat_file(path, start=start, end=end, **kwargs)
1748
+
1749
+ def write_bytes(self, path, value, **kwargs):
1750
+ """Alias of `AbstractFileSystem.pipe_file`."""
1751
+ self.pipe_file(path, value, **kwargs)
1752
+
1753
+ def makedir(self, path, create_parents=True, **kwargs):
1754
+ """Alias of `AbstractFileSystem.mkdir`."""
1755
+ return self.mkdir(path, create_parents=create_parents, **kwargs)
1756
+
1757
+ def mkdirs(self, path, exist_ok=False):
1758
+ """Alias of `AbstractFileSystem.makedirs`."""
1759
+ return self.makedirs(path, exist_ok=exist_ok)
1760
+
1761
+ def listdir(self, path, detail=True, **kwargs):
1762
+ """Alias of `AbstractFileSystem.ls`."""
1763
+ return self.ls(path, detail=detail, **kwargs)
1764
+
1765
+ def cp(self, path1, path2, **kwargs):
1766
+ """Alias of `AbstractFileSystem.copy`."""
1767
+ return self.copy(path1, path2, **kwargs)
1768
+
1769
+ def move(self, path1, path2, **kwargs):
1770
+ """Alias of `AbstractFileSystem.mv`."""
1771
+ return self.mv(path1, path2, **kwargs)
1772
+
1773
+ def stat(self, path, **kwargs):
1774
+ """Alias of `AbstractFileSystem.info`."""
1775
+ return self.info(path, **kwargs)
1776
+
1777
+ def disk_usage(self, path, total=True, maxdepth=None, **kwargs):
1778
+ """Alias of `AbstractFileSystem.du`."""
1779
+ return self.du(path, total=total, maxdepth=maxdepth, **kwargs)
1780
+
1781
+ def rename(self, path1, path2, **kwargs):
1782
+ """Alias of `AbstractFileSystem.mv`."""
1783
+ return self.mv(path1, path2, **kwargs)
1784
+
1785
+ def delete(self, path, recursive=False, maxdepth=None):
1786
+ """Alias of `AbstractFileSystem.rm`."""
1787
+ return self.rm(path, recursive=recursive, maxdepth=maxdepth)
1788
+
1789
+ def upload(self, lpath, rpath, recursive=False, **kwargs):
1790
+ """Alias of `AbstractFileSystem.put`."""
1791
+ return self.put(lpath, rpath, recursive=recursive, **kwargs)
1792
+
1793
+ def download(self, rpath, lpath, recursive=False, **kwargs):
1794
+ """Alias of `AbstractFileSystem.get`."""
1795
+ return self.get(rpath, lpath, recursive=recursive, **kwargs)
1796
+
1797
+ def sign(self, path, expiration=100, **kwargs):
1798
+ """Create a signed URL representing the given path
1799
+
1800
+ Some implementations allow temporary URLs to be generated, as a
1801
+ way of delegating credentials.
1802
+
1803
+ Parameters
1804
+ ----------
1805
+ path : str
1806
+ The path on the filesystem
1807
+ expiration : int
1808
+ Number of seconds to enable the URL for (if supported)
1809
+
1810
+ Returns
1811
+ -------
1812
+ URL : str
1813
+ The signed URL
1814
+
1815
+ Raises
1816
+ ------
1817
+ NotImplementedError : if method is not implemented for a filesystem
1818
+ """
1819
+ raise NotImplementedError("Sign is not implemented for this filesystem")
1820
+
1821
+ def _isfilestore(self):
1822
+ # Originally inherited from pyarrow DaskFileSystem. Keeping this
1823
+ # here for backwards compatibility as long as pyarrow uses its
1824
+ # legacy fsspec-compatible filesystems and thus accepts fsspec
1825
+ # filesystems as well
1826
+ return False
1827
+
1828
+
1829
+ class AbstractBufferedFile(io.IOBase):
1830
+ """Convenient class to derive from to provide buffering
1831
+
1832
+ In the case that the backend does not provide a pythonic file-like object
1833
+ already, this class contains much of the logic to build one. The only
1834
+ methods that need to be overridden are ``_upload_chunk``,
1835
+ ``_initiate_upload`` and ``_fetch_range``.
1836
+ """
1837
+
1838
+ DEFAULT_BLOCK_SIZE = 5 * 2**20
1839
+ _details = None
1840
+
1841
+ def __init__(
1842
+ self,
1843
+ fs,
1844
+ path,
1845
+ mode="rb",
1846
+ block_size="default",
1847
+ autocommit=True,
1848
+ cache_type="readahead",
1849
+ cache_options=None,
1850
+ size=None,
1851
+ **kwargs,
1852
+ ):
1853
+ """
1854
+ Template for files with buffered reading and writing
1855
+
1856
+ Parameters
1857
+ ----------
1858
+ fs: instance of FileSystem
1859
+ path: str
1860
+ location in file-system
1861
+ mode: str
1862
+ Normal file modes. Currently only 'wb', 'ab' or 'rb'. Some file
1863
+ systems may be read-only, and some may not support append.
1864
+ block_size: int
1865
+ Buffer size for reading or writing, 'default' for class default
1866
+ autocommit: bool
1867
+ Whether to write to final destination; may only impact what
1868
+ happens when file is being closed.
1869
+ cache_type: {"readahead", "none", "mmap", "bytes"}, default "readahead"
1870
+ Caching policy in read mode. See the definitions in ``core``.
1871
+ cache_options : dict
1872
+ Additional options passed to the constructor for the cache specified
1873
+ by `cache_type`.
1874
+ size: int
1875
+ If given and in read mode, suppressed having to look up the file size
1876
+ kwargs:
1877
+ Gets stored as self.kwargs
1878
+ """
1879
+ from .core import caches
1880
+
1881
+ self.path = path
1882
+ self.fs = fs
1883
+ self.mode = mode
1884
+ self.blocksize = (
1885
+ self.DEFAULT_BLOCK_SIZE if block_size in ["default", None] else block_size
1886
+ )
1887
+ self.loc = 0
1888
+ self.autocommit = autocommit
1889
+ self.end = None
1890
+ self.start = None
1891
+ self.closed = False
1892
+
1893
+ if cache_options is None:
1894
+ cache_options = {}
1895
+
1896
+ if "trim" in kwargs:
1897
+ warnings.warn(
1898
+ "Passing 'trim' to control the cache behavior has been deprecated. "
1899
+ "Specify it within the 'cache_options' argument instead.",
1900
+ FutureWarning,
1901
+ )
1902
+ cache_options["trim"] = kwargs.pop("trim")
1903
+
1904
+ self.kwargs = kwargs
1905
+
1906
+ if mode not in {"ab", "rb", "wb", "xb"}:
1907
+ raise NotImplementedError("File mode not supported")
1908
+ if mode == "rb":
1909
+ if size is not None:
1910
+ self.size = size
1911
+ else:
1912
+ self.size = self.details["size"]
1913
+ self.cache = caches[cache_type](
1914
+ self.blocksize, self._fetch_range, self.size, **cache_options
1915
+ )
1916
+ else:
1917
+ self.buffer = io.BytesIO()
1918
+ self.offset = None
1919
+ self.forced = False
1920
+ self.location = None
1921
+
1922
+ @property
1923
+ def details(self):
1924
+ if self._details is None:
1925
+ self._details = self.fs.info(self.path)
1926
+ return self._details
1927
+
1928
+ @details.setter
1929
+ def details(self, value):
1930
+ self._details = value
1931
+ self.size = value["size"]
1932
+
1933
+ @property
1934
+ def full_name(self):
1935
+ return _unstrip_protocol(self.path, self.fs)
1936
+
1937
+ @property
1938
+ def closed(self):
1939
+ # get around this attr being read-only in IOBase
1940
+ # use getattr here, since this can be called during del
1941
+ return getattr(self, "_closed", True)
1942
+
1943
+ @closed.setter
1944
+ def closed(self, c):
1945
+ self._closed = c
1946
+
1947
+ def __hash__(self):
1948
+ if "w" in self.mode:
1949
+ return id(self)
1950
+ else:
1951
+ return int(tokenize(self.details), 16)
1952
+
1953
+ def __eq__(self, other):
1954
+ """Files are equal if they have the same checksum, only in read mode"""
1955
+ if self is other:
1956
+ return True
1957
+ return (
1958
+ isinstance(other, type(self))
1959
+ and self.mode == "rb"
1960
+ and other.mode == "rb"
1961
+ and hash(self) == hash(other)
1962
+ )
1963
+
1964
+ def commit(self):
1965
+ """Move from temp to final destination"""
1966
+
1967
+ def discard(self):
1968
+ """Throw away temporary file"""
1969
+
1970
+ def info(self):
1971
+ """File information about this path"""
1972
+ if self.readable():
1973
+ return self.details
1974
+ else:
1975
+ raise ValueError("Info not available while writing")
1976
+
1977
+ def tell(self):
1978
+ """Current file location"""
1979
+ return self.loc
1980
+
1981
+ def seek(self, loc, whence=0):
1982
+ """Set current file location
1983
+
1984
+ Parameters
1985
+ ----------
1986
+ loc: int
1987
+ byte location
1988
+ whence: {0, 1, 2}
1989
+ from start of file, current location or end of file, resp.
1990
+ """
1991
+ loc = int(loc)
1992
+ if not self.mode == "rb":
1993
+ raise OSError(ESPIPE, "Seek only available in read mode")
1994
+ if whence == 0:
1995
+ nloc = loc
1996
+ elif whence == 1:
1997
+ nloc = self.loc + loc
1998
+ elif whence == 2:
1999
+ nloc = self.size + loc
2000
+ else:
2001
+ raise ValueError(f"invalid whence ({whence}, should be 0, 1 or 2)")
2002
+ if nloc < 0:
2003
+ raise ValueError("Seek before start of file")
2004
+ self.loc = nloc
2005
+ return self.loc
2006
+
2007
+ def write(self, data):
2008
+ """
2009
+ Write data to buffer.
2010
+
2011
+ Buffer only sent on flush() or if buffer is greater than
2012
+ or equal to blocksize.
2013
+
2014
+ Parameters
2015
+ ----------
2016
+ data: bytes
2017
+ Set of bytes to be written.
2018
+ """
2019
+ if not self.writable():
2020
+ raise ValueError("File not in write mode")
2021
+ if self.closed:
2022
+ raise ValueError("I/O operation on closed file.")
2023
+ if self.forced:
2024
+ raise ValueError("This file has been force-flushed, can only close")
2025
+ out = self.buffer.write(data)
2026
+ self.loc += out
2027
+ if self.buffer.tell() >= self.blocksize:
2028
+ self.flush()
2029
+ return out
2030
+
2031
+ def flush(self, force=False):
2032
+ """
2033
+ Write buffered data to backend store.
2034
+
2035
+ Writes the current buffer, if it is larger than the block-size, or if
2036
+ the file is being closed.
2037
+
2038
+ Parameters
2039
+ ----------
2040
+ force: bool
2041
+ When closing, write the last block even if it is smaller than
2042
+ blocks are allowed to be. Disallows further writing to this file.
2043
+ """
2044
+
2045
+ if self.closed:
2046
+ raise ValueError("Flush on closed file")
2047
+ if force and self.forced:
2048
+ raise ValueError("Force flush cannot be called more than once")
2049
+ if force:
2050
+ self.forced = True
2051
+
2052
+ if self.readable():
2053
+ # no-op to flush on read-mode
2054
+ return
2055
+
2056
+ if not force and self.buffer.tell() < self.blocksize:
2057
+ # Defer write on small block
2058
+ return
2059
+
2060
+ if self.offset is None:
2061
+ # Initialize a multipart upload
2062
+ self.offset = 0
2063
+ try:
2064
+ self._initiate_upload()
2065
+ except:
2066
+ self.closed = True
2067
+ raise
2068
+
2069
+ if self._upload_chunk(final=force) is not False:
2070
+ self.offset += self.buffer.seek(0, 2)
2071
+ self.buffer = io.BytesIO()
2072
+
2073
+ def _upload_chunk(self, final=False):
2074
+ """Write one part of a multi-block file upload
2075
+
2076
+ Parameters
2077
+ ==========
2078
+ final: bool
2079
+ This is the last block, so should complete file, if
2080
+ self.autocommit is True.
2081
+ """
2082
+ # may not yet have been initialized, may need to call _initialize_upload
2083
+
2084
+ def _initiate_upload(self):
2085
+ """Create remote file/upload"""
2086
+ pass
2087
+
2088
+ def _fetch_range(self, start, end):
2089
+ """Get the specified set of bytes from remote"""
2090
+ return self.fs.cat_file(self.path, start=start, end=end)
2091
+
2092
+ def read(self, length=-1):
2093
+ """
2094
+ Return data from cache, or fetch pieces as necessary
2095
+
2096
+ Parameters
2097
+ ----------
2098
+ length: int (-1)
2099
+ Number of bytes to read; if <0, all remaining bytes.
2100
+ """
2101
+ length = -1 if length is None else int(length)
2102
+ if self.mode != "rb":
2103
+ raise ValueError("File not in read mode")
2104
+ if length < 0:
2105
+ length = self.size - self.loc
2106
+ if self.closed:
2107
+ raise ValueError("I/O operation on closed file.")
2108
+ if length == 0:
2109
+ # don't even bother calling fetch
2110
+ return b""
2111
+ out = self.cache._fetch(self.loc, self.loc + length)
2112
+
2113
+ logger.debug(
2114
+ "%s read: %i - %i %s",
2115
+ self,
2116
+ self.loc,
2117
+ self.loc + length,
2118
+ self.cache._log_stats(),
2119
+ )
2120
+ self.loc += len(out)
2121
+ return out
2122
+
2123
+ def readinto(self, b):
2124
+ """mirrors builtin file's readinto method
2125
+
2126
+ https://docs.python.org/3/library/io.html#io.RawIOBase.readinto
2127
+ """
2128
+ out = memoryview(b).cast("B")
2129
+ data = self.read(out.nbytes)
2130
+ out[: len(data)] = data
2131
+ return len(data)
2132
+
2133
+ def readuntil(self, char=b"\n", blocks=None):
2134
+ """Return data between current position and first occurrence of char
2135
+
2136
+ char is included in the output, except if the end of the tile is
2137
+ encountered first.
2138
+
2139
+ Parameters
2140
+ ----------
2141
+ char: bytes
2142
+ Thing to find
2143
+ blocks: None or int
2144
+ How much to read in each go. Defaults to file blocksize - which may
2145
+ mean a new read on every call.
2146
+ """
2147
+ out = []
2148
+ while True:
2149
+ start = self.tell()
2150
+ part = self.read(blocks or self.blocksize)
2151
+ if len(part) == 0:
2152
+ break
2153
+ found = part.find(char)
2154
+ if found > -1:
2155
+ out.append(part[: found + len(char)])
2156
+ self.seek(start + found + len(char))
2157
+ break
2158
+ out.append(part)
2159
+ return b"".join(out)
2160
+
2161
+ def readline(self):
2162
+ """Read until and including the first occurrence of newline character
2163
+
2164
+ Note that, because of character encoding, this is not necessarily a
2165
+ true line ending.
2166
+ """
2167
+ return self.readuntil(b"\n")
2168
+
2169
+ def __next__(self):
2170
+ out = self.readline()
2171
+ if out:
2172
+ return out
2173
+ raise StopIteration
2174
+
2175
+ def __iter__(self):
2176
+ return self
2177
+
2178
+ def readlines(self):
2179
+ """Return all data, split by the newline character, including the newline character"""
2180
+ data = self.read()
2181
+ lines = data.split(b"\n")
2182
+ out = [l + b"\n" for l in lines[:-1]]
2183
+ if data.endswith(b"\n"):
2184
+ return out
2185
+ else:
2186
+ return out + [lines[-1]]
2187
+ # return list(self) ???
2188
+
2189
+ def readinto1(self, b):
2190
+ return self.readinto(b)
2191
+
2192
+ def close(self):
2193
+ """Close file
2194
+
2195
+ Finalizes writes, discards cache
2196
+ """
2197
+ if getattr(self, "_unclosable", False):
2198
+ return
2199
+ if self.closed:
2200
+ return
2201
+ try:
2202
+ if self.mode == "rb":
2203
+ self.cache = None
2204
+ else:
2205
+ if not self.forced:
2206
+ self.flush(force=True)
2207
+
2208
+ if self.fs is not None:
2209
+ self.fs.invalidate_cache(self.path)
2210
+ self.fs.invalidate_cache(self.fs._parent(self.path))
2211
+ finally:
2212
+ self.closed = True
2213
+
2214
+ def readable(self):
2215
+ """Whether opened for reading"""
2216
+ return "r" in self.mode and not self.closed
2217
+
2218
+ def seekable(self):
2219
+ """Whether is seekable (only in read mode)"""
2220
+ return self.readable()
2221
+
2222
+ def writable(self):
2223
+ """Whether opened for writing"""
2224
+ return self.mode in {"wb", "ab", "xb"} and not self.closed
2225
+
2226
+ def __reduce__(self):
2227
+ if self.mode != "rb":
2228
+ raise RuntimeError("Pickling a writeable file is not supported")
2229
+
2230
+ return reopen, (
2231
+ self.fs,
2232
+ self.path,
2233
+ self.mode,
2234
+ self.blocksize,
2235
+ self.loc,
2236
+ self.size,
2237
+ self.autocommit,
2238
+ self.cache.name if self.cache else "none",
2239
+ self.kwargs,
2240
+ )
2241
+
2242
+ def __del__(self):
2243
+ if not self.closed:
2244
+ self.close()
2245
+
2246
+ def __str__(self):
2247
+ return f"<File-like object {type(self.fs).__name__}, {self.path}>"
2248
+
2249
+ __repr__ = __str__
2250
+
2251
+ def __enter__(self):
2252
+ return self
2253
+
2254
+ def __exit__(self, *args):
2255
+ self.close()
2256
+
2257
+
2258
+ def reopen(fs, path, mode, blocksize, loc, size, autocommit, cache_type, kwargs):
2259
+ file = fs.open(
2260
+ path,
2261
+ mode=mode,
2262
+ block_size=blocksize,
2263
+ autocommit=autocommit,
2264
+ cache_type=cache_type,
2265
+ size=size,
2266
+ **kwargs,
2267
+ )
2268
+ if loc > 0:
2269
+ file.seek(loc)
2270
+ return file
venv/lib/python3.13/site-packages/fsspec/transaction.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import deque
2
+
3
+
4
+ class Transaction:
5
+ """Filesystem transaction write context
6
+
7
+ Gathers files for deferred commit or discard, so that several write
8
+ operations can be finalized semi-atomically. This works by having this
9
+ instance as the ``.transaction`` attribute of the given filesystem
10
+ """
11
+
12
+ def __init__(self, fs, **kwargs):
13
+ """
14
+ Parameters
15
+ ----------
16
+ fs: FileSystem instance
17
+ """
18
+ self.fs = fs
19
+ self.files = deque()
20
+
21
+ def __enter__(self):
22
+ self.start()
23
+ return self
24
+
25
+ def __exit__(self, exc_type, exc_val, exc_tb):
26
+ """End transaction and commit, if exit is not due to exception"""
27
+ # only commit if there was no exception
28
+ self.complete(commit=exc_type is None)
29
+ if self.fs:
30
+ self.fs._intrans = False
31
+ self.fs._transaction = None
32
+ self.fs = None
33
+
34
+ def start(self):
35
+ """Start a transaction on this FileSystem"""
36
+ self.files = deque() # clean up after previous failed completions
37
+ self.fs._intrans = True
38
+
39
+ def complete(self, commit=True):
40
+ """Finish transaction: commit or discard all deferred files"""
41
+ while self.files:
42
+ f = self.files.popleft()
43
+ if commit:
44
+ f.commit()
45
+ else:
46
+ f.discard()
47
+ self.fs._intrans = False
48
+ self.fs._transaction = None
49
+ self.fs = None
50
+
51
+
52
+ class FileActor:
53
+ def __init__(self):
54
+ self.files = []
55
+
56
+ def commit(self):
57
+ for f in self.files:
58
+ f.commit()
59
+ self.files.clear()
60
+
61
+ def discard(self):
62
+ for f in self.files:
63
+ f.discard()
64
+ self.files.clear()
65
+
66
+ def append(self, f):
67
+ self.files.append(f)
68
+
69
+
70
+ class DaskTransaction(Transaction):
71
+ def __init__(self, fs):
72
+ """
73
+ Parameters
74
+ ----------
75
+ fs: FileSystem instance
76
+ """
77
+ import distributed
78
+
79
+ super().__init__(fs)
80
+ client = distributed.default_client()
81
+ self.files = client.submit(FileActor, actor=True).result()
82
+
83
+ def complete(self, commit=True):
84
+ """Finish transaction: commit or discard all deferred files"""
85
+ if commit:
86
+ self.files.commit().result()
87
+ else:
88
+ self.files.discard().result()
89
+ self.fs._intrans = False
90
+ self.fs = None
venv/lib/python3.13/site-packages/fsspec/utils.py ADDED
@@ -0,0 +1,737 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import contextlib
4
+ import logging
5
+ import math
6
+ import os
7
+ import re
8
+ import sys
9
+ import tempfile
10
+ from collections.abc import Iterable, Iterator, Sequence
11
+ from functools import partial
12
+ from hashlib import md5
13
+ from importlib.metadata import version
14
+ from typing import (
15
+ IO,
16
+ TYPE_CHECKING,
17
+ Any,
18
+ Callable,
19
+ TypeVar,
20
+ )
21
+ from urllib.parse import urlsplit
22
+
23
+ if TYPE_CHECKING:
24
+ import pathlib
25
+
26
+ from typing_extensions import TypeGuard
27
+
28
+ from fsspec.spec import AbstractFileSystem
29
+
30
+
31
+ DEFAULT_BLOCK_SIZE = 5 * 2**20
32
+
33
+ T = TypeVar("T")
34
+
35
+
36
+ def infer_storage_options(
37
+ urlpath: str, inherit_storage_options: dict[str, Any] | None = None
38
+ ) -> dict[str, Any]:
39
+ """Infer storage options from URL path and merge it with existing storage
40
+ options.
41
+
42
+ Parameters
43
+ ----------
44
+ urlpath: str or unicode
45
+ Either local absolute file path or URL (hdfs://namenode:8020/file.csv)
46
+ inherit_storage_options: dict (optional)
47
+ Its contents will get merged with the inferred information from the
48
+ given path
49
+
50
+ Returns
51
+ -------
52
+ Storage options dict.
53
+
54
+ Examples
55
+ --------
56
+ >>> infer_storage_options('/mnt/datasets/test.csv') # doctest: +SKIP
57
+ {"protocol": "file", "path", "/mnt/datasets/test.csv"}
58
+ >>> infer_storage_options(
59
+ ... 'hdfs://username:pwd@node:123/mnt/datasets/test.csv?q=1',
60
+ ... inherit_storage_options={'extra': 'value'},
61
+ ... ) # doctest: +SKIP
62
+ {"protocol": "hdfs", "username": "username", "password": "pwd",
63
+ "host": "node", "port": 123, "path": "/mnt/datasets/test.csv",
64
+ "url_query": "q=1", "extra": "value"}
65
+ """
66
+ # Handle Windows paths including disk name in this special case
67
+ if (
68
+ re.match(r"^[a-zA-Z]:[\\/]", urlpath)
69
+ or re.match(r"^[a-zA-Z0-9]+://", urlpath) is None
70
+ ):
71
+ return {"protocol": "file", "path": urlpath}
72
+
73
+ parsed_path = urlsplit(urlpath)
74
+ protocol = parsed_path.scheme or "file"
75
+ if parsed_path.fragment:
76
+ path = "#".join([parsed_path.path, parsed_path.fragment])
77
+ else:
78
+ path = parsed_path.path
79
+ if protocol == "file":
80
+ # Special case parsing file protocol URL on Windows according to:
81
+ # https://msdn.microsoft.com/en-us/library/jj710207.aspx
82
+ windows_path = re.match(r"^/([a-zA-Z])[:|]([\\/].*)$", path)
83
+ if windows_path:
84
+ drive, path = windows_path.groups()
85
+ path = f"{drive}:{path}"
86
+
87
+ if protocol in ["http", "https"]:
88
+ # for HTTP, we don't want to parse, as requests will anyway
89
+ return {"protocol": protocol, "path": urlpath}
90
+
91
+ options: dict[str, Any] = {"protocol": protocol, "path": path}
92
+
93
+ if parsed_path.netloc:
94
+ # Parse `hostname` from netloc manually because `parsed_path.hostname`
95
+ # lowercases the hostname which is not always desirable (e.g. in S3):
96
+ # https://github.com/dask/dask/issues/1417
97
+ options["host"] = parsed_path.netloc.rsplit("@", 1)[-1].rsplit(":", 1)[0]
98
+
99
+ if protocol in ("s3", "s3a", "gcs", "gs"):
100
+ options["path"] = options["host"] + options["path"]
101
+ else:
102
+ options["host"] = options["host"]
103
+ if parsed_path.port:
104
+ options["port"] = parsed_path.port
105
+ if parsed_path.username:
106
+ options["username"] = parsed_path.username
107
+ if parsed_path.password:
108
+ options["password"] = parsed_path.password
109
+
110
+ if parsed_path.query:
111
+ options["url_query"] = parsed_path.query
112
+ if parsed_path.fragment:
113
+ options["url_fragment"] = parsed_path.fragment
114
+
115
+ if inherit_storage_options:
116
+ update_storage_options(options, inherit_storage_options)
117
+
118
+ return options
119
+
120
+
121
+ def update_storage_options(
122
+ options: dict[str, Any], inherited: dict[str, Any] | None = None
123
+ ) -> None:
124
+ if not inherited:
125
+ inherited = {}
126
+ collisions = set(options) & set(inherited)
127
+ if collisions:
128
+ for collision in collisions:
129
+ if options.get(collision) != inherited.get(collision):
130
+ raise KeyError(
131
+ f"Collision between inferred and specified storage "
132
+ f"option:\n{collision}"
133
+ )
134
+ options.update(inherited)
135
+
136
+
137
+ # Compression extensions registered via fsspec.compression.register_compression
138
+ compressions: dict[str, str] = {}
139
+
140
+
141
+ def infer_compression(filename: str) -> str | None:
142
+ """Infer compression, if available, from filename.
143
+
144
+ Infer a named compression type, if registered and available, from filename
145
+ extension. This includes builtin (gz, bz2, zip) compressions, as well as
146
+ optional compressions. See fsspec.compression.register_compression.
147
+ """
148
+ extension = os.path.splitext(filename)[-1].strip(".").lower()
149
+ if extension in compressions:
150
+ return compressions[extension]
151
+ return None
152
+
153
+
154
+ def build_name_function(max_int: float) -> Callable[[int], str]:
155
+ """Returns a function that receives a single integer
156
+ and returns it as a string padded by enough zero characters
157
+ to align with maximum possible integer
158
+
159
+ >>> name_f = build_name_function(57)
160
+
161
+ >>> name_f(7)
162
+ '07'
163
+ >>> name_f(31)
164
+ '31'
165
+ >>> build_name_function(1000)(42)
166
+ '0042'
167
+ >>> build_name_function(999)(42)
168
+ '042'
169
+ >>> build_name_function(0)(0)
170
+ '0'
171
+ """
172
+ # handle corner cases max_int is 0 or exact power of 10
173
+ max_int += 1e-8
174
+
175
+ pad_length = int(math.ceil(math.log10(max_int)))
176
+
177
+ def name_function(i: int) -> str:
178
+ return str(i).zfill(pad_length)
179
+
180
+ return name_function
181
+
182
+
183
+ def seek_delimiter(file: IO[bytes], delimiter: bytes, blocksize: int) -> bool:
184
+ r"""Seek current file to file start, file end, or byte after delimiter seq.
185
+
186
+ Seeks file to next chunk delimiter, where chunks are defined on file start,
187
+ a delimiting sequence, and file end. Use file.tell() to see location afterwards.
188
+ Note that file start is a valid split, so must be at offset > 0 to seek for
189
+ delimiter.
190
+
191
+ Parameters
192
+ ----------
193
+ file: a file
194
+ delimiter: bytes
195
+ a delimiter like ``b'\n'`` or message sentinel, matching file .read() type
196
+ blocksize: int
197
+ Number of bytes to read from the file at once.
198
+
199
+
200
+ Returns
201
+ -------
202
+ Returns True if a delimiter was found, False if at file start or end.
203
+
204
+ """
205
+
206
+ if file.tell() == 0:
207
+ # beginning-of-file, return without seek
208
+ return False
209
+
210
+ # Interface is for binary IO, with delimiter as bytes, but initialize last
211
+ # with result of file.read to preserve compatibility with text IO.
212
+ last: bytes | None = None
213
+ while True:
214
+ current = file.read(blocksize)
215
+ if not current:
216
+ # end-of-file without delimiter
217
+ return False
218
+ full = last + current if last else current
219
+ try:
220
+ if delimiter in full:
221
+ i = full.index(delimiter)
222
+ file.seek(file.tell() - (len(full) - i) + len(delimiter))
223
+ return True
224
+ elif len(current) < blocksize:
225
+ # end-of-file without delimiter
226
+ return False
227
+ except (OSError, ValueError):
228
+ pass
229
+ last = full[-len(delimiter) :]
230
+
231
+
232
+ def read_block(
233
+ f: IO[bytes],
234
+ offset: int,
235
+ length: int | None,
236
+ delimiter: bytes | None = None,
237
+ split_before: bool = False,
238
+ ) -> bytes:
239
+ """Read a block of bytes from a file
240
+
241
+ Parameters
242
+ ----------
243
+ f: File
244
+ Open file
245
+ offset: int
246
+ Byte offset to start read
247
+ length: int
248
+ Number of bytes to read, read through end of file if None
249
+ delimiter: bytes (optional)
250
+ Ensure reading starts and stops at delimiter bytestring
251
+ split_before: bool (optional)
252
+ Start/stop read *before* delimiter bytestring.
253
+
254
+
255
+ If using the ``delimiter=`` keyword argument we ensure that the read
256
+ starts and stops at delimiter boundaries that follow the locations
257
+ ``offset`` and ``offset + length``. If ``offset`` is zero then we
258
+ start at zero, regardless of delimiter. The bytestring returned WILL
259
+ include the terminating delimiter string.
260
+
261
+ Examples
262
+ --------
263
+
264
+ >>> from io import BytesIO # doctest: +SKIP
265
+ >>> f = BytesIO(b'Alice, 100\\nBob, 200\\nCharlie, 300') # doctest: +SKIP
266
+ >>> read_block(f, 0, 13) # doctest: +SKIP
267
+ b'Alice, 100\\nBo'
268
+
269
+ >>> read_block(f, 0, 13, delimiter=b'\\n') # doctest: +SKIP
270
+ b'Alice, 100\\nBob, 200\\n'
271
+
272
+ >>> read_block(f, 10, 10, delimiter=b'\\n') # doctest: +SKIP
273
+ b'Bob, 200\\nCharlie, 300'
274
+ """
275
+ if delimiter:
276
+ f.seek(offset)
277
+ found_start_delim = seek_delimiter(f, delimiter, 2**16)
278
+ if length is None:
279
+ return f.read()
280
+ start = f.tell()
281
+ length -= start - offset
282
+
283
+ f.seek(start + length)
284
+ found_end_delim = seek_delimiter(f, delimiter, 2**16)
285
+ end = f.tell()
286
+
287
+ # Adjust split location to before delimiter if seek found the
288
+ # delimiter sequence, not start or end of file.
289
+ if found_start_delim and split_before:
290
+ start -= len(delimiter)
291
+
292
+ if found_end_delim and split_before:
293
+ end -= len(delimiter)
294
+
295
+ offset = start
296
+ length = end - start
297
+
298
+ f.seek(offset)
299
+
300
+ # TODO: allow length to be None and read to the end of the file?
301
+ assert length is not None
302
+ b = f.read(length)
303
+ return b
304
+
305
+
306
+ def tokenize(*args: Any, **kwargs: Any) -> str:
307
+ """Deterministic token
308
+
309
+ (modified from dask.base)
310
+
311
+ >>> tokenize([1, 2, '3'])
312
+ '9d71491b50023b06fc76928e6eddb952'
313
+
314
+ >>> tokenize('Hello') == tokenize('Hello')
315
+ True
316
+ """
317
+ if kwargs:
318
+ args += (kwargs,)
319
+ try:
320
+ h = md5(str(args).encode())
321
+ except ValueError:
322
+ # FIPS systems: https://github.com/fsspec/filesystem_spec/issues/380
323
+ h = md5(str(args).encode(), usedforsecurity=False)
324
+ return h.hexdigest()
325
+
326
+
327
+ def stringify_path(filepath: str | os.PathLike[str] | pathlib.Path) -> str:
328
+ """Attempt to convert a path-like object to a string.
329
+
330
+ Parameters
331
+ ----------
332
+ filepath: object to be converted
333
+
334
+ Returns
335
+ -------
336
+ filepath_str: maybe a string version of the object
337
+
338
+ Notes
339
+ -----
340
+ Objects supporting the fspath protocol are coerced according to its
341
+ __fspath__ method.
342
+
343
+ For backwards compatibility with older Python version, pathlib.Path
344
+ objects are specially coerced.
345
+
346
+ Any other object is passed through unchanged, which includes bytes,
347
+ strings, buffers, or anything else that's not even path-like.
348
+ """
349
+ if isinstance(filepath, str):
350
+ return filepath
351
+ elif hasattr(filepath, "__fspath__"):
352
+ return filepath.__fspath__()
353
+ elif hasattr(filepath, "path"):
354
+ return filepath.path
355
+ else:
356
+ return filepath # type: ignore[return-value]
357
+
358
+
359
+ def make_instance(
360
+ cls: Callable[..., T], args: Sequence[Any], kwargs: dict[str, Any]
361
+ ) -> T:
362
+ inst = cls(*args, **kwargs)
363
+ inst._determine_worker() # type: ignore[attr-defined]
364
+ return inst
365
+
366
+
367
+ def common_prefix(paths: Iterable[str]) -> str:
368
+ """For a list of paths, find the shortest prefix common to all"""
369
+ parts = [p.split("/") for p in paths]
370
+ lmax = min(len(p) for p in parts)
371
+ end = 0
372
+ for i in range(lmax):
373
+ end = all(p[i] == parts[0][i] for p in parts)
374
+ if not end:
375
+ break
376
+ i += end
377
+ return "/".join(parts[0][:i])
378
+
379
+
380
+ def other_paths(
381
+ paths: list[str],
382
+ path2: str | list[str],
383
+ exists: bool = False,
384
+ flatten: bool = False,
385
+ ) -> list[str]:
386
+ """In bulk file operations, construct a new file tree from a list of files
387
+
388
+ Parameters
389
+ ----------
390
+ paths: list of str
391
+ The input file tree
392
+ path2: str or list of str
393
+ Root to construct the new list in. If this is already a list of str, we just
394
+ assert it has the right number of elements.
395
+ exists: bool (optional)
396
+ For a str destination, it is already exists (and is a dir), files should
397
+ end up inside.
398
+ flatten: bool (optional)
399
+ Whether to flatten the input directory tree structure so that the output files
400
+ are in the same directory.
401
+
402
+ Returns
403
+ -------
404
+ list of str
405
+ """
406
+
407
+ if isinstance(path2, str):
408
+ path2 = path2.rstrip("/")
409
+
410
+ if flatten:
411
+ path2 = ["/".join((path2, p.split("/")[-1])) for p in paths]
412
+ else:
413
+ cp = common_prefix(paths)
414
+ if exists:
415
+ cp = cp.rsplit("/", 1)[0]
416
+ if not cp and all(not s.startswith("/") for s in paths):
417
+ path2 = ["/".join([path2, p]) for p in paths]
418
+ else:
419
+ path2 = [p.replace(cp, path2, 1) for p in paths]
420
+ else:
421
+ assert len(paths) == len(path2)
422
+ return path2
423
+
424
+
425
+ def is_exception(obj: Any) -> bool:
426
+ return isinstance(obj, BaseException)
427
+
428
+
429
+ def isfilelike(f: Any) -> TypeGuard[IO[bytes]]:
430
+ return all(hasattr(f, attr) for attr in ["read", "close", "tell"])
431
+
432
+
433
+ def get_protocol(url: str) -> str:
434
+ url = stringify_path(url)
435
+ parts = re.split(r"(\:\:|\://)", url, maxsplit=1)
436
+ if len(parts) > 1:
437
+ return parts[0]
438
+ return "file"
439
+
440
+
441
+ def can_be_local(path: str) -> bool:
442
+ """Can the given URL be used with open_local?"""
443
+ from fsspec import get_filesystem_class
444
+
445
+ try:
446
+ return getattr(get_filesystem_class(get_protocol(path)), "local_file", False)
447
+ except (ValueError, ImportError):
448
+ # not in registry or import failed
449
+ return False
450
+
451
+
452
+ def get_package_version_without_import(name: str) -> str | None:
453
+ """For given package name, try to find the version without importing it
454
+
455
+ Import and package.__version__ is still the backup here, so an import
456
+ *might* happen.
457
+
458
+ Returns either the version string, or None if the package
459
+ or the version was not readily found.
460
+ """
461
+ if name in sys.modules:
462
+ mod = sys.modules[name]
463
+ if hasattr(mod, "__version__"):
464
+ return mod.__version__
465
+ try:
466
+ return version(name)
467
+ except: # noqa: E722
468
+ pass
469
+ try:
470
+ import importlib
471
+
472
+ mod = importlib.import_module(name)
473
+ return mod.__version__
474
+ except (ImportError, AttributeError):
475
+ return None
476
+
477
+
478
+ def setup_logging(
479
+ logger: logging.Logger | None = None,
480
+ logger_name: str | None = None,
481
+ level: str = "DEBUG",
482
+ clear: bool = True,
483
+ ) -> logging.Logger:
484
+ if logger is None and logger_name is None:
485
+ raise ValueError("Provide either logger object or logger name")
486
+ logger = logger or logging.getLogger(logger_name)
487
+ handle = logging.StreamHandler()
488
+ formatter = logging.Formatter(
489
+ "%(asctime)s - %(name)s - %(levelname)s - %(funcName)s -- %(message)s"
490
+ )
491
+ handle.setFormatter(formatter)
492
+ if clear:
493
+ logger.handlers.clear()
494
+ logger.addHandler(handle)
495
+ logger.setLevel(level)
496
+ return logger
497
+
498
+
499
+ def _unstrip_protocol(name: str, fs: AbstractFileSystem) -> str:
500
+ return fs.unstrip_protocol(name)
501
+
502
+
503
+ def mirror_from(
504
+ origin_name: str, methods: Iterable[str]
505
+ ) -> Callable[[type[T]], type[T]]:
506
+ """Mirror attributes and methods from the given
507
+ origin_name attribute of the instance to the
508
+ decorated class"""
509
+
510
+ def origin_getter(method: str, self: Any) -> Any:
511
+ origin = getattr(self, origin_name)
512
+ return getattr(origin, method)
513
+
514
+ def wrapper(cls: type[T]) -> type[T]:
515
+ for method in methods:
516
+ wrapped_method = partial(origin_getter, method)
517
+ setattr(cls, method, property(wrapped_method))
518
+ return cls
519
+
520
+ return wrapper
521
+
522
+
523
+ @contextlib.contextmanager
524
+ def nullcontext(obj: T) -> Iterator[T]:
525
+ yield obj
526
+
527
+
528
+ def merge_offset_ranges(
529
+ paths: list[str],
530
+ starts: list[int] | int,
531
+ ends: list[int] | int,
532
+ max_gap: int = 0,
533
+ max_block: int | None = None,
534
+ sort: bool = True,
535
+ ) -> tuple[list[str], list[int], list[int]]:
536
+ """Merge adjacent byte-offset ranges when the inter-range
537
+ gap is <= `max_gap`, and when the merged byte range does not
538
+ exceed `max_block` (if specified). By default, this function
539
+ will re-order the input paths and byte ranges to ensure sorted
540
+ order. If the user can guarantee that the inputs are already
541
+ sorted, passing `sort=False` will skip the re-ordering.
542
+ """
543
+ # Check input
544
+ if not isinstance(paths, list):
545
+ raise TypeError
546
+ if not isinstance(starts, list):
547
+ starts = [starts] * len(paths)
548
+ if not isinstance(ends, list):
549
+ ends = [ends] * len(paths)
550
+ if len(starts) != len(paths) or len(ends) != len(paths):
551
+ raise ValueError
552
+
553
+ # Early Return
554
+ if len(starts) <= 1:
555
+ return paths, starts, ends
556
+
557
+ starts = [s or 0 for s in starts]
558
+ # Sort by paths and then ranges if `sort=True`
559
+ if sort:
560
+ paths, starts, ends = (
561
+ list(v)
562
+ for v in zip(
563
+ *sorted(
564
+ zip(paths, starts, ends),
565
+ )
566
+ )
567
+ )
568
+
569
+ if paths:
570
+ # Loop through the coupled `paths`, `starts`, and
571
+ # `ends`, and merge adjacent blocks when appropriate
572
+ new_paths = paths[:1]
573
+ new_starts = starts[:1]
574
+ new_ends = ends[:1]
575
+ for i in range(1, len(paths)):
576
+ if paths[i] == paths[i - 1] and new_ends[-1] is None:
577
+ continue
578
+ elif (
579
+ paths[i] != paths[i - 1]
580
+ or ((starts[i] - new_ends[-1]) > max_gap)
581
+ or (max_block is not None and (ends[i] - new_starts[-1]) > max_block)
582
+ ):
583
+ # Cannot merge with previous block.
584
+ # Add new `paths`, `starts`, and `ends` elements
585
+ new_paths.append(paths[i])
586
+ new_starts.append(starts[i])
587
+ new_ends.append(ends[i])
588
+ else:
589
+ # Merge with previous block by updating the
590
+ # last element of `ends`
591
+ new_ends[-1] = ends[i]
592
+ return new_paths, new_starts, new_ends
593
+
594
+ # `paths` is empty. Just return input lists
595
+ return paths, starts, ends
596
+
597
+
598
+ def file_size(filelike: IO[bytes]) -> int:
599
+ """Find length of any open read-mode file-like"""
600
+ pos = filelike.tell()
601
+ try:
602
+ return filelike.seek(0, 2)
603
+ finally:
604
+ filelike.seek(pos)
605
+
606
+
607
+ @contextlib.contextmanager
608
+ def atomic_write(path: str, mode: str = "wb"):
609
+ """
610
+ A context manager that opens a temporary file next to `path` and, on exit,
611
+ replaces `path` with the temporary file, thereby updating `path`
612
+ atomically.
613
+ """
614
+ fd, fn = tempfile.mkstemp(
615
+ dir=os.path.dirname(path), prefix=os.path.basename(path) + "-"
616
+ )
617
+ try:
618
+ with open(fd, mode) as fp:
619
+ yield fp
620
+ except BaseException:
621
+ with contextlib.suppress(FileNotFoundError):
622
+ os.unlink(fn)
623
+ raise
624
+ else:
625
+ os.replace(fn, path)
626
+
627
+
628
+ def _translate(pat, STAR, QUESTION_MARK):
629
+ # Copied from: https://github.com/python/cpython/pull/106703.
630
+ res: list[str] = []
631
+ add = res.append
632
+ i, n = 0, len(pat)
633
+ while i < n:
634
+ c = pat[i]
635
+ i = i + 1
636
+ if c == "*":
637
+ # compress consecutive `*` into one
638
+ if (not res) or res[-1] is not STAR:
639
+ add(STAR)
640
+ elif c == "?":
641
+ add(QUESTION_MARK)
642
+ elif c == "[":
643
+ j = i
644
+ if j < n and pat[j] == "!":
645
+ j = j + 1
646
+ if j < n and pat[j] == "]":
647
+ j = j + 1
648
+ while j < n and pat[j] != "]":
649
+ j = j + 1
650
+ if j >= n:
651
+ add("\\[")
652
+ else:
653
+ stuff = pat[i:j]
654
+ if "-" not in stuff:
655
+ stuff = stuff.replace("\\", r"\\")
656
+ else:
657
+ chunks = []
658
+ k = i + 2 if pat[i] == "!" else i + 1
659
+ while True:
660
+ k = pat.find("-", k, j)
661
+ if k < 0:
662
+ break
663
+ chunks.append(pat[i:k])
664
+ i = k + 1
665
+ k = k + 3
666
+ chunk = pat[i:j]
667
+ if chunk:
668
+ chunks.append(chunk)
669
+ else:
670
+ chunks[-1] += "-"
671
+ # Remove empty ranges -- invalid in RE.
672
+ for k in range(len(chunks) - 1, 0, -1):
673
+ if chunks[k - 1][-1] > chunks[k][0]:
674
+ chunks[k - 1] = chunks[k - 1][:-1] + chunks[k][1:]
675
+ del chunks[k]
676
+ # Escape backslashes and hyphens for set difference (--).
677
+ # Hyphens that create ranges shouldn't be escaped.
678
+ stuff = "-".join(
679
+ s.replace("\\", r"\\").replace("-", r"\-") for s in chunks
680
+ )
681
+ # Escape set operations (&&, ~~ and ||).
682
+ stuff = re.sub(r"([&~|])", r"\\\1", stuff)
683
+ i = j + 1
684
+ if not stuff:
685
+ # Empty range: never match.
686
+ add("(?!)")
687
+ elif stuff == "!":
688
+ # Negated empty range: match any character.
689
+ add(".")
690
+ else:
691
+ if stuff[0] == "!":
692
+ stuff = "^" + stuff[1:]
693
+ elif stuff[0] in ("^", "["):
694
+ stuff = "\\" + stuff
695
+ add(f"[{stuff}]")
696
+ else:
697
+ add(re.escape(c))
698
+ assert i == n
699
+ return res
700
+
701
+
702
+ def glob_translate(pat):
703
+ # Copied from: https://github.com/python/cpython/pull/106703.
704
+ # The keyword parameters' values are fixed to:
705
+ # recursive=True, include_hidden=True, seps=None
706
+ """Translate a pathname with shell wildcards to a regular expression."""
707
+ if os.path.altsep:
708
+ seps = os.path.sep + os.path.altsep
709
+ else:
710
+ seps = os.path.sep
711
+ escaped_seps = "".join(map(re.escape, seps))
712
+ any_sep = f"[{escaped_seps}]" if len(seps) > 1 else escaped_seps
713
+ not_sep = f"[^{escaped_seps}]"
714
+ one_last_segment = f"{not_sep}+"
715
+ one_segment = f"{one_last_segment}{any_sep}"
716
+ any_segments = f"(?:.+{any_sep})?"
717
+ any_last_segments = ".*"
718
+ results = []
719
+ parts = re.split(any_sep, pat)
720
+ last_part_idx = len(parts) - 1
721
+ for idx, part in enumerate(parts):
722
+ if part == "*":
723
+ results.append(one_segment if idx < last_part_idx else one_last_segment)
724
+ continue
725
+ if part == "**":
726
+ results.append(any_segments if idx < last_part_idx else any_last_segments)
727
+ continue
728
+ elif "**" in part:
729
+ raise ValueError(
730
+ "Invalid pattern: '**' can only be an entire path component"
731
+ )
732
+ if part:
733
+ results.extend(_translate(part, f"{not_sep}*", not_sep))
734
+ if idx < last_part_idx:
735
+ results.append(any_sep)
736
+ res = "".join(results)
737
+ return rf"(?s:{res})\Z"
venv/lib/python3.13/site-packages/hf_xet/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from .hf_xet import *
2
+
3
+ __doc__ = hf_xet.__doc__
4
+ if hasattr(hf_xet, "__all__"):
5
+ __all__ = hf_xet.__all__
venv/lib/python3.13/site-packages/huggingface_hub/__init__.py ADDED
@@ -0,0 +1,1548 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # ***********
16
+ # `huggingface_hub` init has 2 modes:
17
+ # - Normal usage:
18
+ # If imported to use it, all modules and functions are lazy-loaded. This means
19
+ # they exist at top level in module but are imported only the first time they are
20
+ # used. This way, `from huggingface_hub import something` will import `something`
21
+ # quickly without the hassle of importing all the features from `huggingface_hub`.
22
+ # - Static check:
23
+ # If statically analyzed, all modules and functions are loaded normally. This way
24
+ # static typing check works properly as well as autocomplete in text editors and
25
+ # IDEs.
26
+ #
27
+ # The static model imports are done inside the `if TYPE_CHECKING:` statement at
28
+ # the bottom of this file. Since module/functions imports are duplicated, it is
29
+ # mandatory to make sure to add them twice when adding one. This is checked in the
30
+ # `make quality` command.
31
+ #
32
+ # To update the static imports, please run the following command and commit the changes.
33
+ # ```
34
+ # # Use script
35
+ # python utils/check_static_imports.py --update-file
36
+ #
37
+ # # Or run style on codebase
38
+ # make style
39
+ # ```
40
+ #
41
+ # ***********
42
+ # Lazy loader vendored from https://github.com/scientific-python/lazy_loader
43
+ import importlib
44
+ import os
45
+ import sys
46
+ from typing import TYPE_CHECKING
47
+
48
+
49
+ __version__ = "0.35.1"
50
+
51
+ # Alphabetical order of definitions is ensured in tests
52
+ # WARNING: any comment added in this dictionary definition will be lost when
53
+ # re-generating the file !
54
+ _SUBMOD_ATTRS = {
55
+ "_commit_scheduler": [
56
+ "CommitScheduler",
57
+ ],
58
+ "_inference_endpoints": [
59
+ "InferenceEndpoint",
60
+ "InferenceEndpointError",
61
+ "InferenceEndpointStatus",
62
+ "InferenceEndpointTimeoutError",
63
+ "InferenceEndpointType",
64
+ ],
65
+ "_jobs_api": [
66
+ "JobInfo",
67
+ "JobOwner",
68
+ "JobStage",
69
+ "JobStatus",
70
+ ],
71
+ "_login": [
72
+ "auth_list",
73
+ "auth_switch",
74
+ "interpreter_login",
75
+ "login",
76
+ "logout",
77
+ "notebook_login",
78
+ ],
79
+ "_oauth": [
80
+ "OAuthInfo",
81
+ "OAuthOrgInfo",
82
+ "OAuthUserInfo",
83
+ "attach_huggingface_oauth",
84
+ "parse_huggingface_oauth",
85
+ ],
86
+ "_snapshot_download": [
87
+ "snapshot_download",
88
+ ],
89
+ "_space_api": [
90
+ "SpaceHardware",
91
+ "SpaceRuntime",
92
+ "SpaceStage",
93
+ "SpaceStorage",
94
+ "SpaceVariable",
95
+ ],
96
+ "_tensorboard_logger": [
97
+ "HFSummaryWriter",
98
+ ],
99
+ "_webhooks_payload": [
100
+ "WebhookPayload",
101
+ "WebhookPayloadComment",
102
+ "WebhookPayloadDiscussion",
103
+ "WebhookPayloadDiscussionChanges",
104
+ "WebhookPayloadEvent",
105
+ "WebhookPayloadMovedTo",
106
+ "WebhookPayloadRepo",
107
+ "WebhookPayloadUrl",
108
+ "WebhookPayloadWebhook",
109
+ ],
110
+ "_webhooks_server": [
111
+ "WebhooksServer",
112
+ "webhook_endpoint",
113
+ ],
114
+ "community": [
115
+ "Discussion",
116
+ "DiscussionComment",
117
+ "DiscussionCommit",
118
+ "DiscussionEvent",
119
+ "DiscussionStatusChange",
120
+ "DiscussionTitleChange",
121
+ "DiscussionWithDetails",
122
+ ],
123
+ "constants": [
124
+ "CONFIG_NAME",
125
+ "FLAX_WEIGHTS_NAME",
126
+ "HUGGINGFACE_CO_URL_HOME",
127
+ "HUGGINGFACE_CO_URL_TEMPLATE",
128
+ "PYTORCH_WEIGHTS_NAME",
129
+ "REPO_TYPE_DATASET",
130
+ "REPO_TYPE_MODEL",
131
+ "REPO_TYPE_SPACE",
132
+ "TF2_WEIGHTS_NAME",
133
+ "TF_WEIGHTS_NAME",
134
+ ],
135
+ "fastai_utils": [
136
+ "_save_pretrained_fastai",
137
+ "from_pretrained_fastai",
138
+ "push_to_hub_fastai",
139
+ ],
140
+ "file_download": [
141
+ "HfFileMetadata",
142
+ "_CACHED_NO_EXIST",
143
+ "get_hf_file_metadata",
144
+ "hf_hub_download",
145
+ "hf_hub_url",
146
+ "try_to_load_from_cache",
147
+ ],
148
+ "hf_api": [
149
+ "Collection",
150
+ "CollectionItem",
151
+ "CommitInfo",
152
+ "CommitOperation",
153
+ "CommitOperationAdd",
154
+ "CommitOperationCopy",
155
+ "CommitOperationDelete",
156
+ "DatasetInfo",
157
+ "GitCommitInfo",
158
+ "GitRefInfo",
159
+ "GitRefs",
160
+ "HfApi",
161
+ "ModelInfo",
162
+ "RepoUrl",
163
+ "SpaceInfo",
164
+ "User",
165
+ "UserLikes",
166
+ "WebhookInfo",
167
+ "WebhookWatchedItem",
168
+ "accept_access_request",
169
+ "add_collection_item",
170
+ "add_space_secret",
171
+ "add_space_variable",
172
+ "auth_check",
173
+ "cancel_access_request",
174
+ "cancel_job",
175
+ "change_discussion_status",
176
+ "comment_discussion",
177
+ "create_branch",
178
+ "create_collection",
179
+ "create_commit",
180
+ "create_discussion",
181
+ "create_inference_endpoint",
182
+ "create_inference_endpoint_from_catalog",
183
+ "create_pull_request",
184
+ "create_repo",
185
+ "create_scheduled_job",
186
+ "create_scheduled_uv_job",
187
+ "create_tag",
188
+ "create_webhook",
189
+ "dataset_info",
190
+ "delete_branch",
191
+ "delete_collection",
192
+ "delete_collection_item",
193
+ "delete_file",
194
+ "delete_folder",
195
+ "delete_inference_endpoint",
196
+ "delete_repo",
197
+ "delete_scheduled_job",
198
+ "delete_space_secret",
199
+ "delete_space_storage",
200
+ "delete_space_variable",
201
+ "delete_tag",
202
+ "delete_webhook",
203
+ "disable_webhook",
204
+ "duplicate_space",
205
+ "edit_discussion_comment",
206
+ "enable_webhook",
207
+ "fetch_job_logs",
208
+ "file_exists",
209
+ "get_collection",
210
+ "get_dataset_tags",
211
+ "get_discussion_details",
212
+ "get_full_repo_name",
213
+ "get_inference_endpoint",
214
+ "get_model_tags",
215
+ "get_paths_info",
216
+ "get_repo_discussions",
217
+ "get_safetensors_metadata",
218
+ "get_space_runtime",
219
+ "get_space_variables",
220
+ "get_token_permission",
221
+ "get_user_overview",
222
+ "get_webhook",
223
+ "grant_access",
224
+ "inspect_job",
225
+ "inspect_scheduled_job",
226
+ "list_accepted_access_requests",
227
+ "list_collections",
228
+ "list_datasets",
229
+ "list_inference_catalog",
230
+ "list_inference_endpoints",
231
+ "list_jobs",
232
+ "list_lfs_files",
233
+ "list_liked_repos",
234
+ "list_models",
235
+ "list_organization_members",
236
+ "list_papers",
237
+ "list_pending_access_requests",
238
+ "list_rejected_access_requests",
239
+ "list_repo_commits",
240
+ "list_repo_files",
241
+ "list_repo_likers",
242
+ "list_repo_refs",
243
+ "list_repo_tree",
244
+ "list_spaces",
245
+ "list_user_followers",
246
+ "list_user_following",
247
+ "list_webhooks",
248
+ "merge_pull_request",
249
+ "model_info",
250
+ "move_repo",
251
+ "paper_info",
252
+ "parse_safetensors_file_metadata",
253
+ "pause_inference_endpoint",
254
+ "pause_space",
255
+ "permanently_delete_lfs_files",
256
+ "preupload_lfs_files",
257
+ "reject_access_request",
258
+ "rename_discussion",
259
+ "repo_exists",
260
+ "repo_info",
261
+ "repo_type_and_id_from_hf_id",
262
+ "request_space_hardware",
263
+ "request_space_storage",
264
+ "restart_space",
265
+ "resume_inference_endpoint",
266
+ "resume_scheduled_job",
267
+ "revision_exists",
268
+ "run_as_future",
269
+ "run_job",
270
+ "run_uv_job",
271
+ "scale_to_zero_inference_endpoint",
272
+ "set_space_sleep_time",
273
+ "space_info",
274
+ "super_squash_history",
275
+ "suspend_scheduled_job",
276
+ "unlike",
277
+ "update_collection_item",
278
+ "update_collection_metadata",
279
+ "update_inference_endpoint",
280
+ "update_repo_settings",
281
+ "update_repo_visibility",
282
+ "update_webhook",
283
+ "upload_file",
284
+ "upload_folder",
285
+ "upload_large_folder",
286
+ "whoami",
287
+ ],
288
+ "hf_file_system": [
289
+ "HfFileSystem",
290
+ "HfFileSystemFile",
291
+ "HfFileSystemResolvedPath",
292
+ "HfFileSystemStreamFile",
293
+ ],
294
+ "hub_mixin": [
295
+ "ModelHubMixin",
296
+ "PyTorchModelHubMixin",
297
+ ],
298
+ "inference._client": [
299
+ "InferenceClient",
300
+ "InferenceTimeoutError",
301
+ ],
302
+ "inference._generated._async_client": [
303
+ "AsyncInferenceClient",
304
+ ],
305
+ "inference._generated.types": [
306
+ "AudioClassificationInput",
307
+ "AudioClassificationOutputElement",
308
+ "AudioClassificationOutputTransform",
309
+ "AudioClassificationParameters",
310
+ "AudioToAudioInput",
311
+ "AudioToAudioOutputElement",
312
+ "AutomaticSpeechRecognitionEarlyStoppingEnum",
313
+ "AutomaticSpeechRecognitionGenerationParameters",
314
+ "AutomaticSpeechRecognitionInput",
315
+ "AutomaticSpeechRecognitionOutput",
316
+ "AutomaticSpeechRecognitionOutputChunk",
317
+ "AutomaticSpeechRecognitionParameters",
318
+ "ChatCompletionInput",
319
+ "ChatCompletionInputFunctionDefinition",
320
+ "ChatCompletionInputFunctionName",
321
+ "ChatCompletionInputGrammarType",
322
+ "ChatCompletionInputJSONSchema",
323
+ "ChatCompletionInputMessage",
324
+ "ChatCompletionInputMessageChunk",
325
+ "ChatCompletionInputMessageChunkType",
326
+ "ChatCompletionInputResponseFormatJSONObject",
327
+ "ChatCompletionInputResponseFormatJSONSchema",
328
+ "ChatCompletionInputResponseFormatText",
329
+ "ChatCompletionInputStreamOptions",
330
+ "ChatCompletionInputTool",
331
+ "ChatCompletionInputToolCall",
332
+ "ChatCompletionInputToolChoiceClass",
333
+ "ChatCompletionInputToolChoiceEnum",
334
+ "ChatCompletionInputURL",
335
+ "ChatCompletionOutput",
336
+ "ChatCompletionOutputComplete",
337
+ "ChatCompletionOutputFunctionDefinition",
338
+ "ChatCompletionOutputLogprob",
339
+ "ChatCompletionOutputLogprobs",
340
+ "ChatCompletionOutputMessage",
341
+ "ChatCompletionOutputToolCall",
342
+ "ChatCompletionOutputTopLogprob",
343
+ "ChatCompletionOutputUsage",
344
+ "ChatCompletionStreamOutput",
345
+ "ChatCompletionStreamOutputChoice",
346
+ "ChatCompletionStreamOutputDelta",
347
+ "ChatCompletionStreamOutputDeltaToolCall",
348
+ "ChatCompletionStreamOutputFunction",
349
+ "ChatCompletionStreamOutputLogprob",
350
+ "ChatCompletionStreamOutputLogprobs",
351
+ "ChatCompletionStreamOutputTopLogprob",
352
+ "ChatCompletionStreamOutputUsage",
353
+ "DepthEstimationInput",
354
+ "DepthEstimationOutput",
355
+ "DocumentQuestionAnsweringInput",
356
+ "DocumentQuestionAnsweringInputData",
357
+ "DocumentQuestionAnsweringOutputElement",
358
+ "DocumentQuestionAnsweringParameters",
359
+ "FeatureExtractionInput",
360
+ "FeatureExtractionInputTruncationDirection",
361
+ "FillMaskInput",
362
+ "FillMaskOutputElement",
363
+ "FillMaskParameters",
364
+ "ImageClassificationInput",
365
+ "ImageClassificationOutputElement",
366
+ "ImageClassificationOutputTransform",
367
+ "ImageClassificationParameters",
368
+ "ImageSegmentationInput",
369
+ "ImageSegmentationOutputElement",
370
+ "ImageSegmentationParameters",
371
+ "ImageSegmentationSubtask",
372
+ "ImageToImageInput",
373
+ "ImageToImageOutput",
374
+ "ImageToImageParameters",
375
+ "ImageToImageTargetSize",
376
+ "ImageToTextEarlyStoppingEnum",
377
+ "ImageToTextGenerationParameters",
378
+ "ImageToTextInput",
379
+ "ImageToTextOutput",
380
+ "ImageToTextParameters",
381
+ "ImageToVideoInput",
382
+ "ImageToVideoOutput",
383
+ "ImageToVideoParameters",
384
+ "ImageToVideoTargetSize",
385
+ "ObjectDetectionBoundingBox",
386
+ "ObjectDetectionInput",
387
+ "ObjectDetectionOutputElement",
388
+ "ObjectDetectionParameters",
389
+ "Padding",
390
+ "QuestionAnsweringInput",
391
+ "QuestionAnsweringInputData",
392
+ "QuestionAnsweringOutputElement",
393
+ "QuestionAnsweringParameters",
394
+ "SentenceSimilarityInput",
395
+ "SentenceSimilarityInputData",
396
+ "SummarizationInput",
397
+ "SummarizationOutput",
398
+ "SummarizationParameters",
399
+ "SummarizationTruncationStrategy",
400
+ "TableQuestionAnsweringInput",
401
+ "TableQuestionAnsweringInputData",
402
+ "TableQuestionAnsweringOutputElement",
403
+ "TableQuestionAnsweringParameters",
404
+ "Text2TextGenerationInput",
405
+ "Text2TextGenerationOutput",
406
+ "Text2TextGenerationParameters",
407
+ "Text2TextGenerationTruncationStrategy",
408
+ "TextClassificationInput",
409
+ "TextClassificationOutputElement",
410
+ "TextClassificationOutputTransform",
411
+ "TextClassificationParameters",
412
+ "TextGenerationInput",
413
+ "TextGenerationInputGenerateParameters",
414
+ "TextGenerationInputGrammarType",
415
+ "TextGenerationOutput",
416
+ "TextGenerationOutputBestOfSequence",
417
+ "TextGenerationOutputDetails",
418
+ "TextGenerationOutputFinishReason",
419
+ "TextGenerationOutputPrefillToken",
420
+ "TextGenerationOutputToken",
421
+ "TextGenerationStreamOutput",
422
+ "TextGenerationStreamOutputStreamDetails",
423
+ "TextGenerationStreamOutputToken",
424
+ "TextToAudioEarlyStoppingEnum",
425
+ "TextToAudioGenerationParameters",
426
+ "TextToAudioInput",
427
+ "TextToAudioOutput",
428
+ "TextToAudioParameters",
429
+ "TextToImageInput",
430
+ "TextToImageOutput",
431
+ "TextToImageParameters",
432
+ "TextToSpeechEarlyStoppingEnum",
433
+ "TextToSpeechGenerationParameters",
434
+ "TextToSpeechInput",
435
+ "TextToSpeechOutput",
436
+ "TextToSpeechParameters",
437
+ "TextToVideoInput",
438
+ "TextToVideoOutput",
439
+ "TextToVideoParameters",
440
+ "TokenClassificationAggregationStrategy",
441
+ "TokenClassificationInput",
442
+ "TokenClassificationOutputElement",
443
+ "TokenClassificationParameters",
444
+ "TranslationInput",
445
+ "TranslationOutput",
446
+ "TranslationParameters",
447
+ "TranslationTruncationStrategy",
448
+ "TypeEnum",
449
+ "VideoClassificationInput",
450
+ "VideoClassificationOutputElement",
451
+ "VideoClassificationOutputTransform",
452
+ "VideoClassificationParameters",
453
+ "VisualQuestionAnsweringInput",
454
+ "VisualQuestionAnsweringInputData",
455
+ "VisualQuestionAnsweringOutputElement",
456
+ "VisualQuestionAnsweringParameters",
457
+ "ZeroShotClassificationInput",
458
+ "ZeroShotClassificationOutputElement",
459
+ "ZeroShotClassificationParameters",
460
+ "ZeroShotImageClassificationInput",
461
+ "ZeroShotImageClassificationOutputElement",
462
+ "ZeroShotImageClassificationParameters",
463
+ "ZeroShotObjectDetectionBoundingBox",
464
+ "ZeroShotObjectDetectionInput",
465
+ "ZeroShotObjectDetectionOutputElement",
466
+ "ZeroShotObjectDetectionParameters",
467
+ ],
468
+ "inference._mcp.agent": [
469
+ "Agent",
470
+ ],
471
+ "inference._mcp.mcp_client": [
472
+ "MCPClient",
473
+ ],
474
+ "inference_api": [
475
+ "InferenceApi",
476
+ ],
477
+ "keras_mixin": [
478
+ "KerasModelHubMixin",
479
+ "from_pretrained_keras",
480
+ "push_to_hub_keras",
481
+ "save_pretrained_keras",
482
+ ],
483
+ "repocard": [
484
+ "DatasetCard",
485
+ "ModelCard",
486
+ "RepoCard",
487
+ "SpaceCard",
488
+ "metadata_eval_result",
489
+ "metadata_load",
490
+ "metadata_save",
491
+ "metadata_update",
492
+ ],
493
+ "repocard_data": [
494
+ "CardData",
495
+ "DatasetCardData",
496
+ "EvalResult",
497
+ "ModelCardData",
498
+ "SpaceCardData",
499
+ ],
500
+ "repository": [
501
+ "Repository",
502
+ ],
503
+ "serialization": [
504
+ "StateDictSplit",
505
+ "get_tf_storage_size",
506
+ "get_torch_storage_id",
507
+ "get_torch_storage_size",
508
+ "load_state_dict_from_file",
509
+ "load_torch_model",
510
+ "save_torch_model",
511
+ "save_torch_state_dict",
512
+ "split_state_dict_into_shards_factory",
513
+ "split_tf_state_dict_into_shards",
514
+ "split_torch_state_dict_into_shards",
515
+ ],
516
+ "serialization._dduf": [
517
+ "DDUFEntry",
518
+ "export_entries_as_dduf",
519
+ "export_folder_as_dduf",
520
+ "read_dduf_file",
521
+ ],
522
+ "utils": [
523
+ "CacheNotFound",
524
+ "CachedFileInfo",
525
+ "CachedRepoInfo",
526
+ "CachedRevisionInfo",
527
+ "CorruptedCacheException",
528
+ "DeleteCacheStrategy",
529
+ "HFCacheInfo",
530
+ "HfFolder",
531
+ "cached_assets_path",
532
+ "configure_http_backend",
533
+ "dump_environment_info",
534
+ "get_session",
535
+ "get_token",
536
+ "logging",
537
+ "scan_cache_dir",
538
+ ],
539
+ }
540
+
541
+ # WARNING: __all__ is generated automatically, Any manual edit will be lost when re-generating this file !
542
+ #
543
+ # To update the static imports, please run the following command and commit the changes.
544
+ # ```
545
+ # # Use script
546
+ # python utils/check_all_variable.py --update
547
+ #
548
+ # # Or run style on codebase
549
+ # make style
550
+ # ```
551
+
552
+ __all__ = [
553
+ "Agent",
554
+ "AsyncInferenceClient",
555
+ "AudioClassificationInput",
556
+ "AudioClassificationOutputElement",
557
+ "AudioClassificationOutputTransform",
558
+ "AudioClassificationParameters",
559
+ "AudioToAudioInput",
560
+ "AudioToAudioOutputElement",
561
+ "AutomaticSpeechRecognitionEarlyStoppingEnum",
562
+ "AutomaticSpeechRecognitionGenerationParameters",
563
+ "AutomaticSpeechRecognitionInput",
564
+ "AutomaticSpeechRecognitionOutput",
565
+ "AutomaticSpeechRecognitionOutputChunk",
566
+ "AutomaticSpeechRecognitionParameters",
567
+ "CONFIG_NAME",
568
+ "CacheNotFound",
569
+ "CachedFileInfo",
570
+ "CachedRepoInfo",
571
+ "CachedRevisionInfo",
572
+ "CardData",
573
+ "ChatCompletionInput",
574
+ "ChatCompletionInputFunctionDefinition",
575
+ "ChatCompletionInputFunctionName",
576
+ "ChatCompletionInputGrammarType",
577
+ "ChatCompletionInputJSONSchema",
578
+ "ChatCompletionInputMessage",
579
+ "ChatCompletionInputMessageChunk",
580
+ "ChatCompletionInputMessageChunkType",
581
+ "ChatCompletionInputResponseFormatJSONObject",
582
+ "ChatCompletionInputResponseFormatJSONSchema",
583
+ "ChatCompletionInputResponseFormatText",
584
+ "ChatCompletionInputStreamOptions",
585
+ "ChatCompletionInputTool",
586
+ "ChatCompletionInputToolCall",
587
+ "ChatCompletionInputToolChoiceClass",
588
+ "ChatCompletionInputToolChoiceEnum",
589
+ "ChatCompletionInputURL",
590
+ "ChatCompletionOutput",
591
+ "ChatCompletionOutputComplete",
592
+ "ChatCompletionOutputFunctionDefinition",
593
+ "ChatCompletionOutputLogprob",
594
+ "ChatCompletionOutputLogprobs",
595
+ "ChatCompletionOutputMessage",
596
+ "ChatCompletionOutputToolCall",
597
+ "ChatCompletionOutputTopLogprob",
598
+ "ChatCompletionOutputUsage",
599
+ "ChatCompletionStreamOutput",
600
+ "ChatCompletionStreamOutputChoice",
601
+ "ChatCompletionStreamOutputDelta",
602
+ "ChatCompletionStreamOutputDeltaToolCall",
603
+ "ChatCompletionStreamOutputFunction",
604
+ "ChatCompletionStreamOutputLogprob",
605
+ "ChatCompletionStreamOutputLogprobs",
606
+ "ChatCompletionStreamOutputTopLogprob",
607
+ "ChatCompletionStreamOutputUsage",
608
+ "Collection",
609
+ "CollectionItem",
610
+ "CommitInfo",
611
+ "CommitOperation",
612
+ "CommitOperationAdd",
613
+ "CommitOperationCopy",
614
+ "CommitOperationDelete",
615
+ "CommitScheduler",
616
+ "CorruptedCacheException",
617
+ "DDUFEntry",
618
+ "DatasetCard",
619
+ "DatasetCardData",
620
+ "DatasetInfo",
621
+ "DeleteCacheStrategy",
622
+ "DepthEstimationInput",
623
+ "DepthEstimationOutput",
624
+ "Discussion",
625
+ "DiscussionComment",
626
+ "DiscussionCommit",
627
+ "DiscussionEvent",
628
+ "DiscussionStatusChange",
629
+ "DiscussionTitleChange",
630
+ "DiscussionWithDetails",
631
+ "DocumentQuestionAnsweringInput",
632
+ "DocumentQuestionAnsweringInputData",
633
+ "DocumentQuestionAnsweringOutputElement",
634
+ "DocumentQuestionAnsweringParameters",
635
+ "EvalResult",
636
+ "FLAX_WEIGHTS_NAME",
637
+ "FeatureExtractionInput",
638
+ "FeatureExtractionInputTruncationDirection",
639
+ "FillMaskInput",
640
+ "FillMaskOutputElement",
641
+ "FillMaskParameters",
642
+ "GitCommitInfo",
643
+ "GitRefInfo",
644
+ "GitRefs",
645
+ "HFCacheInfo",
646
+ "HFSummaryWriter",
647
+ "HUGGINGFACE_CO_URL_HOME",
648
+ "HUGGINGFACE_CO_URL_TEMPLATE",
649
+ "HfApi",
650
+ "HfFileMetadata",
651
+ "HfFileSystem",
652
+ "HfFileSystemFile",
653
+ "HfFileSystemResolvedPath",
654
+ "HfFileSystemStreamFile",
655
+ "HfFolder",
656
+ "ImageClassificationInput",
657
+ "ImageClassificationOutputElement",
658
+ "ImageClassificationOutputTransform",
659
+ "ImageClassificationParameters",
660
+ "ImageSegmentationInput",
661
+ "ImageSegmentationOutputElement",
662
+ "ImageSegmentationParameters",
663
+ "ImageSegmentationSubtask",
664
+ "ImageToImageInput",
665
+ "ImageToImageOutput",
666
+ "ImageToImageParameters",
667
+ "ImageToImageTargetSize",
668
+ "ImageToTextEarlyStoppingEnum",
669
+ "ImageToTextGenerationParameters",
670
+ "ImageToTextInput",
671
+ "ImageToTextOutput",
672
+ "ImageToTextParameters",
673
+ "ImageToVideoInput",
674
+ "ImageToVideoOutput",
675
+ "ImageToVideoParameters",
676
+ "ImageToVideoTargetSize",
677
+ "InferenceApi",
678
+ "InferenceClient",
679
+ "InferenceEndpoint",
680
+ "InferenceEndpointError",
681
+ "InferenceEndpointStatus",
682
+ "InferenceEndpointTimeoutError",
683
+ "InferenceEndpointType",
684
+ "InferenceTimeoutError",
685
+ "JobInfo",
686
+ "JobOwner",
687
+ "JobStage",
688
+ "JobStatus",
689
+ "KerasModelHubMixin",
690
+ "MCPClient",
691
+ "ModelCard",
692
+ "ModelCardData",
693
+ "ModelHubMixin",
694
+ "ModelInfo",
695
+ "OAuthInfo",
696
+ "OAuthOrgInfo",
697
+ "OAuthUserInfo",
698
+ "ObjectDetectionBoundingBox",
699
+ "ObjectDetectionInput",
700
+ "ObjectDetectionOutputElement",
701
+ "ObjectDetectionParameters",
702
+ "PYTORCH_WEIGHTS_NAME",
703
+ "Padding",
704
+ "PyTorchModelHubMixin",
705
+ "QuestionAnsweringInput",
706
+ "QuestionAnsweringInputData",
707
+ "QuestionAnsweringOutputElement",
708
+ "QuestionAnsweringParameters",
709
+ "REPO_TYPE_DATASET",
710
+ "REPO_TYPE_MODEL",
711
+ "REPO_TYPE_SPACE",
712
+ "RepoCard",
713
+ "RepoUrl",
714
+ "Repository",
715
+ "SentenceSimilarityInput",
716
+ "SentenceSimilarityInputData",
717
+ "SpaceCard",
718
+ "SpaceCardData",
719
+ "SpaceHardware",
720
+ "SpaceInfo",
721
+ "SpaceRuntime",
722
+ "SpaceStage",
723
+ "SpaceStorage",
724
+ "SpaceVariable",
725
+ "StateDictSplit",
726
+ "SummarizationInput",
727
+ "SummarizationOutput",
728
+ "SummarizationParameters",
729
+ "SummarizationTruncationStrategy",
730
+ "TF2_WEIGHTS_NAME",
731
+ "TF_WEIGHTS_NAME",
732
+ "TableQuestionAnsweringInput",
733
+ "TableQuestionAnsweringInputData",
734
+ "TableQuestionAnsweringOutputElement",
735
+ "TableQuestionAnsweringParameters",
736
+ "Text2TextGenerationInput",
737
+ "Text2TextGenerationOutput",
738
+ "Text2TextGenerationParameters",
739
+ "Text2TextGenerationTruncationStrategy",
740
+ "TextClassificationInput",
741
+ "TextClassificationOutputElement",
742
+ "TextClassificationOutputTransform",
743
+ "TextClassificationParameters",
744
+ "TextGenerationInput",
745
+ "TextGenerationInputGenerateParameters",
746
+ "TextGenerationInputGrammarType",
747
+ "TextGenerationOutput",
748
+ "TextGenerationOutputBestOfSequence",
749
+ "TextGenerationOutputDetails",
750
+ "TextGenerationOutputFinishReason",
751
+ "TextGenerationOutputPrefillToken",
752
+ "TextGenerationOutputToken",
753
+ "TextGenerationStreamOutput",
754
+ "TextGenerationStreamOutputStreamDetails",
755
+ "TextGenerationStreamOutputToken",
756
+ "TextToAudioEarlyStoppingEnum",
757
+ "TextToAudioGenerationParameters",
758
+ "TextToAudioInput",
759
+ "TextToAudioOutput",
760
+ "TextToAudioParameters",
761
+ "TextToImageInput",
762
+ "TextToImageOutput",
763
+ "TextToImageParameters",
764
+ "TextToSpeechEarlyStoppingEnum",
765
+ "TextToSpeechGenerationParameters",
766
+ "TextToSpeechInput",
767
+ "TextToSpeechOutput",
768
+ "TextToSpeechParameters",
769
+ "TextToVideoInput",
770
+ "TextToVideoOutput",
771
+ "TextToVideoParameters",
772
+ "TokenClassificationAggregationStrategy",
773
+ "TokenClassificationInput",
774
+ "TokenClassificationOutputElement",
775
+ "TokenClassificationParameters",
776
+ "TranslationInput",
777
+ "TranslationOutput",
778
+ "TranslationParameters",
779
+ "TranslationTruncationStrategy",
780
+ "TypeEnum",
781
+ "User",
782
+ "UserLikes",
783
+ "VideoClassificationInput",
784
+ "VideoClassificationOutputElement",
785
+ "VideoClassificationOutputTransform",
786
+ "VideoClassificationParameters",
787
+ "VisualQuestionAnsweringInput",
788
+ "VisualQuestionAnsweringInputData",
789
+ "VisualQuestionAnsweringOutputElement",
790
+ "VisualQuestionAnsweringParameters",
791
+ "WebhookInfo",
792
+ "WebhookPayload",
793
+ "WebhookPayloadComment",
794
+ "WebhookPayloadDiscussion",
795
+ "WebhookPayloadDiscussionChanges",
796
+ "WebhookPayloadEvent",
797
+ "WebhookPayloadMovedTo",
798
+ "WebhookPayloadRepo",
799
+ "WebhookPayloadUrl",
800
+ "WebhookPayloadWebhook",
801
+ "WebhookWatchedItem",
802
+ "WebhooksServer",
803
+ "ZeroShotClassificationInput",
804
+ "ZeroShotClassificationOutputElement",
805
+ "ZeroShotClassificationParameters",
806
+ "ZeroShotImageClassificationInput",
807
+ "ZeroShotImageClassificationOutputElement",
808
+ "ZeroShotImageClassificationParameters",
809
+ "ZeroShotObjectDetectionBoundingBox",
810
+ "ZeroShotObjectDetectionInput",
811
+ "ZeroShotObjectDetectionOutputElement",
812
+ "ZeroShotObjectDetectionParameters",
813
+ "_CACHED_NO_EXIST",
814
+ "_save_pretrained_fastai",
815
+ "accept_access_request",
816
+ "add_collection_item",
817
+ "add_space_secret",
818
+ "add_space_variable",
819
+ "attach_huggingface_oauth",
820
+ "auth_check",
821
+ "auth_list",
822
+ "auth_switch",
823
+ "cached_assets_path",
824
+ "cancel_access_request",
825
+ "cancel_job",
826
+ "change_discussion_status",
827
+ "comment_discussion",
828
+ "configure_http_backend",
829
+ "create_branch",
830
+ "create_collection",
831
+ "create_commit",
832
+ "create_discussion",
833
+ "create_inference_endpoint",
834
+ "create_inference_endpoint_from_catalog",
835
+ "create_pull_request",
836
+ "create_repo",
837
+ "create_scheduled_job",
838
+ "create_scheduled_uv_job",
839
+ "create_tag",
840
+ "create_webhook",
841
+ "dataset_info",
842
+ "delete_branch",
843
+ "delete_collection",
844
+ "delete_collection_item",
845
+ "delete_file",
846
+ "delete_folder",
847
+ "delete_inference_endpoint",
848
+ "delete_repo",
849
+ "delete_scheduled_job",
850
+ "delete_space_secret",
851
+ "delete_space_storage",
852
+ "delete_space_variable",
853
+ "delete_tag",
854
+ "delete_webhook",
855
+ "disable_webhook",
856
+ "dump_environment_info",
857
+ "duplicate_space",
858
+ "edit_discussion_comment",
859
+ "enable_webhook",
860
+ "export_entries_as_dduf",
861
+ "export_folder_as_dduf",
862
+ "fetch_job_logs",
863
+ "file_exists",
864
+ "from_pretrained_fastai",
865
+ "from_pretrained_keras",
866
+ "get_collection",
867
+ "get_dataset_tags",
868
+ "get_discussion_details",
869
+ "get_full_repo_name",
870
+ "get_hf_file_metadata",
871
+ "get_inference_endpoint",
872
+ "get_model_tags",
873
+ "get_paths_info",
874
+ "get_repo_discussions",
875
+ "get_safetensors_metadata",
876
+ "get_session",
877
+ "get_space_runtime",
878
+ "get_space_variables",
879
+ "get_tf_storage_size",
880
+ "get_token",
881
+ "get_token_permission",
882
+ "get_torch_storage_id",
883
+ "get_torch_storage_size",
884
+ "get_user_overview",
885
+ "get_webhook",
886
+ "grant_access",
887
+ "hf_hub_download",
888
+ "hf_hub_url",
889
+ "inspect_job",
890
+ "inspect_scheduled_job",
891
+ "interpreter_login",
892
+ "list_accepted_access_requests",
893
+ "list_collections",
894
+ "list_datasets",
895
+ "list_inference_catalog",
896
+ "list_inference_endpoints",
897
+ "list_jobs",
898
+ "list_lfs_files",
899
+ "list_liked_repos",
900
+ "list_models",
901
+ "list_organization_members",
902
+ "list_papers",
903
+ "list_pending_access_requests",
904
+ "list_rejected_access_requests",
905
+ "list_repo_commits",
906
+ "list_repo_files",
907
+ "list_repo_likers",
908
+ "list_repo_refs",
909
+ "list_repo_tree",
910
+ "list_spaces",
911
+ "list_user_followers",
912
+ "list_user_following",
913
+ "list_webhooks",
914
+ "load_state_dict_from_file",
915
+ "load_torch_model",
916
+ "logging",
917
+ "login",
918
+ "logout",
919
+ "merge_pull_request",
920
+ "metadata_eval_result",
921
+ "metadata_load",
922
+ "metadata_save",
923
+ "metadata_update",
924
+ "model_info",
925
+ "move_repo",
926
+ "notebook_login",
927
+ "paper_info",
928
+ "parse_huggingface_oauth",
929
+ "parse_safetensors_file_metadata",
930
+ "pause_inference_endpoint",
931
+ "pause_space",
932
+ "permanently_delete_lfs_files",
933
+ "preupload_lfs_files",
934
+ "push_to_hub_fastai",
935
+ "push_to_hub_keras",
936
+ "read_dduf_file",
937
+ "reject_access_request",
938
+ "rename_discussion",
939
+ "repo_exists",
940
+ "repo_info",
941
+ "repo_type_and_id_from_hf_id",
942
+ "request_space_hardware",
943
+ "request_space_storage",
944
+ "restart_space",
945
+ "resume_inference_endpoint",
946
+ "resume_scheduled_job",
947
+ "revision_exists",
948
+ "run_as_future",
949
+ "run_job",
950
+ "run_uv_job",
951
+ "save_pretrained_keras",
952
+ "save_torch_model",
953
+ "save_torch_state_dict",
954
+ "scale_to_zero_inference_endpoint",
955
+ "scan_cache_dir",
956
+ "set_space_sleep_time",
957
+ "snapshot_download",
958
+ "space_info",
959
+ "split_state_dict_into_shards_factory",
960
+ "split_tf_state_dict_into_shards",
961
+ "split_torch_state_dict_into_shards",
962
+ "super_squash_history",
963
+ "suspend_scheduled_job",
964
+ "try_to_load_from_cache",
965
+ "unlike",
966
+ "update_collection_item",
967
+ "update_collection_metadata",
968
+ "update_inference_endpoint",
969
+ "update_repo_settings",
970
+ "update_repo_visibility",
971
+ "update_webhook",
972
+ "upload_file",
973
+ "upload_folder",
974
+ "upload_large_folder",
975
+ "webhook_endpoint",
976
+ "whoami",
977
+ ]
978
+
979
+
980
+ def _attach(package_name, submodules=None, submod_attrs=None):
981
+ """Attach lazily loaded submodules, functions, or other attributes.
982
+
983
+ Typically, modules import submodules and attributes as follows:
984
+
985
+ ```py
986
+ import mysubmodule
987
+ import anothersubmodule
988
+
989
+ from .foo import someattr
990
+ ```
991
+
992
+ The idea is to replace a package's `__getattr__`, `__dir__`, such that all imports
993
+ work exactly the way they would with normal imports, except that the import occurs
994
+ upon first use.
995
+
996
+ The typical way to call this function, replacing the above imports, is:
997
+
998
+ ```python
999
+ __getattr__, __dir__ = lazy.attach(
1000
+ __name__,
1001
+ ['mysubmodule', 'anothersubmodule'],
1002
+ {'foo': ['someattr']}
1003
+ )
1004
+ ```
1005
+ This functionality requires Python 3.7 or higher.
1006
+
1007
+ Args:
1008
+ package_name (`str`):
1009
+ Typically use `__name__`.
1010
+ submodules (`set`):
1011
+ List of submodules to attach.
1012
+ submod_attrs (`dict`):
1013
+ Dictionary of submodule -> list of attributes / functions.
1014
+ These attributes are imported as they are used.
1015
+
1016
+ Returns:
1017
+ __getattr__, __dir__, __all__
1018
+
1019
+ """
1020
+ if submod_attrs is None:
1021
+ submod_attrs = {}
1022
+
1023
+ if submodules is None:
1024
+ submodules = set()
1025
+ else:
1026
+ submodules = set(submodules)
1027
+
1028
+ attr_to_modules = {attr: mod for mod, attrs in submod_attrs.items() for attr in attrs}
1029
+
1030
+ def __getattr__(name):
1031
+ if name in submodules:
1032
+ try:
1033
+ return importlib.import_module(f"{package_name}.{name}")
1034
+ except Exception as e:
1035
+ print(f"Error importing {package_name}.{name}: {e}")
1036
+ raise
1037
+ elif name in attr_to_modules:
1038
+ submod_path = f"{package_name}.{attr_to_modules[name]}"
1039
+ try:
1040
+ submod = importlib.import_module(submod_path)
1041
+ except Exception as e:
1042
+ print(f"Error importing {submod_path}: {e}")
1043
+ raise
1044
+ attr = getattr(submod, name)
1045
+
1046
+ # If the attribute lives in a file (module) with the same
1047
+ # name as the attribute, ensure that the attribute and *not*
1048
+ # the module is accessible on the package.
1049
+ if name == attr_to_modules[name]:
1050
+ pkg = sys.modules[package_name]
1051
+ pkg.__dict__[name] = attr
1052
+
1053
+ return attr
1054
+ else:
1055
+ raise AttributeError(f"No {package_name} attribute {name}")
1056
+
1057
+ def __dir__():
1058
+ return __all__
1059
+
1060
+ return __getattr__, __dir__
1061
+
1062
+
1063
+ __getattr__, __dir__ = _attach(__name__, submodules=[], submod_attrs=_SUBMOD_ATTRS)
1064
+
1065
+ if os.environ.get("EAGER_IMPORT", ""):
1066
+ for attr in __all__:
1067
+ __getattr__(attr)
1068
+
1069
+ # WARNING: any content below this statement is generated automatically. Any manual edit
1070
+ # will be lost when re-generating this file !
1071
+ #
1072
+ # To update the static imports, please run the following command and commit the changes.
1073
+ # ```
1074
+ # # Use script
1075
+ # python utils/check_static_imports.py --update
1076
+ #
1077
+ # # Or run style on codebase
1078
+ # make style
1079
+ # ```
1080
+ if TYPE_CHECKING: # pragma: no cover
1081
+ from ._commit_scheduler import CommitScheduler # noqa: F401
1082
+ from ._inference_endpoints import (
1083
+ InferenceEndpoint, # noqa: F401
1084
+ InferenceEndpointError, # noqa: F401
1085
+ InferenceEndpointStatus, # noqa: F401
1086
+ InferenceEndpointTimeoutError, # noqa: F401
1087
+ InferenceEndpointType, # noqa: F401
1088
+ )
1089
+ from ._jobs_api import (
1090
+ JobInfo, # noqa: F401
1091
+ JobOwner, # noqa: F401
1092
+ JobStage, # noqa: F401
1093
+ JobStatus, # noqa: F401
1094
+ )
1095
+ from ._login import (
1096
+ auth_list, # noqa: F401
1097
+ auth_switch, # noqa: F401
1098
+ interpreter_login, # noqa: F401
1099
+ login, # noqa: F401
1100
+ logout, # noqa: F401
1101
+ notebook_login, # noqa: F401
1102
+ )
1103
+ from ._oauth import (
1104
+ OAuthInfo, # noqa: F401
1105
+ OAuthOrgInfo, # noqa: F401
1106
+ OAuthUserInfo, # noqa: F401
1107
+ attach_huggingface_oauth, # noqa: F401
1108
+ parse_huggingface_oauth, # noqa: F401
1109
+ )
1110
+ from ._snapshot_download import snapshot_download # noqa: F401
1111
+ from ._space_api import (
1112
+ SpaceHardware, # noqa: F401
1113
+ SpaceRuntime, # noqa: F401
1114
+ SpaceStage, # noqa: F401
1115
+ SpaceStorage, # noqa: F401
1116
+ SpaceVariable, # noqa: F401
1117
+ )
1118
+ from ._tensorboard_logger import HFSummaryWriter # noqa: F401
1119
+ from ._webhooks_payload import (
1120
+ WebhookPayload, # noqa: F401
1121
+ WebhookPayloadComment, # noqa: F401
1122
+ WebhookPayloadDiscussion, # noqa: F401
1123
+ WebhookPayloadDiscussionChanges, # noqa: F401
1124
+ WebhookPayloadEvent, # noqa: F401
1125
+ WebhookPayloadMovedTo, # noqa: F401
1126
+ WebhookPayloadRepo, # noqa: F401
1127
+ WebhookPayloadUrl, # noqa: F401
1128
+ WebhookPayloadWebhook, # noqa: F401
1129
+ )
1130
+ from ._webhooks_server import (
1131
+ WebhooksServer, # noqa: F401
1132
+ webhook_endpoint, # noqa: F401
1133
+ )
1134
+ from .community import (
1135
+ Discussion, # noqa: F401
1136
+ DiscussionComment, # noqa: F401
1137
+ DiscussionCommit, # noqa: F401
1138
+ DiscussionEvent, # noqa: F401
1139
+ DiscussionStatusChange, # noqa: F401
1140
+ DiscussionTitleChange, # noqa: F401
1141
+ DiscussionWithDetails, # noqa: F401
1142
+ )
1143
+ from .constants import (
1144
+ CONFIG_NAME, # noqa: F401
1145
+ FLAX_WEIGHTS_NAME, # noqa: F401
1146
+ HUGGINGFACE_CO_URL_HOME, # noqa: F401
1147
+ HUGGINGFACE_CO_URL_TEMPLATE, # noqa: F401
1148
+ PYTORCH_WEIGHTS_NAME, # noqa: F401
1149
+ REPO_TYPE_DATASET, # noqa: F401
1150
+ REPO_TYPE_MODEL, # noqa: F401
1151
+ REPO_TYPE_SPACE, # noqa: F401
1152
+ TF2_WEIGHTS_NAME, # noqa: F401
1153
+ TF_WEIGHTS_NAME, # noqa: F401
1154
+ )
1155
+ from .fastai_utils import (
1156
+ _save_pretrained_fastai, # noqa: F401
1157
+ from_pretrained_fastai, # noqa: F401
1158
+ push_to_hub_fastai, # noqa: F401
1159
+ )
1160
+ from .file_download import (
1161
+ _CACHED_NO_EXIST, # noqa: F401
1162
+ HfFileMetadata, # noqa: F401
1163
+ get_hf_file_metadata, # noqa: F401
1164
+ hf_hub_download, # noqa: F401
1165
+ hf_hub_url, # noqa: F401
1166
+ try_to_load_from_cache, # noqa: F401
1167
+ )
1168
+ from .hf_api import (
1169
+ Collection, # noqa: F401
1170
+ CollectionItem, # noqa: F401
1171
+ CommitInfo, # noqa: F401
1172
+ CommitOperation, # noqa: F401
1173
+ CommitOperationAdd, # noqa: F401
1174
+ CommitOperationCopy, # noqa: F401
1175
+ CommitOperationDelete, # noqa: F401
1176
+ DatasetInfo, # noqa: F401
1177
+ GitCommitInfo, # noqa: F401
1178
+ GitRefInfo, # noqa: F401
1179
+ GitRefs, # noqa: F401
1180
+ HfApi, # noqa: F401
1181
+ ModelInfo, # noqa: F401
1182
+ RepoUrl, # noqa: F401
1183
+ SpaceInfo, # noqa: F401
1184
+ User, # noqa: F401
1185
+ UserLikes, # noqa: F401
1186
+ WebhookInfo, # noqa: F401
1187
+ WebhookWatchedItem, # noqa: F401
1188
+ accept_access_request, # noqa: F401
1189
+ add_collection_item, # noqa: F401
1190
+ add_space_secret, # noqa: F401
1191
+ add_space_variable, # noqa: F401
1192
+ auth_check, # noqa: F401
1193
+ cancel_access_request, # noqa: F401
1194
+ cancel_job, # noqa: F401
1195
+ change_discussion_status, # noqa: F401
1196
+ comment_discussion, # noqa: F401
1197
+ create_branch, # noqa: F401
1198
+ create_collection, # noqa: F401
1199
+ create_commit, # noqa: F401
1200
+ create_discussion, # noqa: F401
1201
+ create_inference_endpoint, # noqa: F401
1202
+ create_inference_endpoint_from_catalog, # noqa: F401
1203
+ create_pull_request, # noqa: F401
1204
+ create_repo, # noqa: F401
1205
+ create_scheduled_job, # noqa: F401
1206
+ create_scheduled_uv_job, # noqa: F401
1207
+ create_tag, # noqa: F401
1208
+ create_webhook, # noqa: F401
1209
+ dataset_info, # noqa: F401
1210
+ delete_branch, # noqa: F401
1211
+ delete_collection, # noqa: F401
1212
+ delete_collection_item, # noqa: F401
1213
+ delete_file, # noqa: F401
1214
+ delete_folder, # noqa: F401
1215
+ delete_inference_endpoint, # noqa: F401
1216
+ delete_repo, # noqa: F401
1217
+ delete_scheduled_job, # noqa: F401
1218
+ delete_space_secret, # noqa: F401
1219
+ delete_space_storage, # noqa: F401
1220
+ delete_space_variable, # noqa: F401
1221
+ delete_tag, # noqa: F401
1222
+ delete_webhook, # noqa: F401
1223
+ disable_webhook, # noqa: F401
1224
+ duplicate_space, # noqa: F401
1225
+ edit_discussion_comment, # noqa: F401
1226
+ enable_webhook, # noqa: F401
1227
+ fetch_job_logs, # noqa: F401
1228
+ file_exists, # noqa: F401
1229
+ get_collection, # noqa: F401
1230
+ get_dataset_tags, # noqa: F401
1231
+ get_discussion_details, # noqa: F401
1232
+ get_full_repo_name, # noqa: F401
1233
+ get_inference_endpoint, # noqa: F401
1234
+ get_model_tags, # noqa: F401
1235
+ get_paths_info, # noqa: F401
1236
+ get_repo_discussions, # noqa: F401
1237
+ get_safetensors_metadata, # noqa: F401
1238
+ get_space_runtime, # noqa: F401
1239
+ get_space_variables, # noqa: F401
1240
+ get_token_permission, # noqa: F401
1241
+ get_user_overview, # noqa: F401
1242
+ get_webhook, # noqa: F401
1243
+ grant_access, # noqa: F401
1244
+ inspect_job, # noqa: F401
1245
+ inspect_scheduled_job, # noqa: F401
1246
+ list_accepted_access_requests, # noqa: F401
1247
+ list_collections, # noqa: F401
1248
+ list_datasets, # noqa: F401
1249
+ list_inference_catalog, # noqa: F401
1250
+ list_inference_endpoints, # noqa: F401
1251
+ list_jobs, # noqa: F401
1252
+ list_lfs_files, # noqa: F401
1253
+ list_liked_repos, # noqa: F401
1254
+ list_models, # noqa: F401
1255
+ list_organization_members, # noqa: F401
1256
+ list_papers, # noqa: F401
1257
+ list_pending_access_requests, # noqa: F401
1258
+ list_rejected_access_requests, # noqa: F401
1259
+ list_repo_commits, # noqa: F401
1260
+ list_repo_files, # noqa: F401
1261
+ list_repo_likers, # noqa: F401
1262
+ list_repo_refs, # noqa: F401
1263
+ list_repo_tree, # noqa: F401
1264
+ list_spaces, # noqa: F401
1265
+ list_user_followers, # noqa: F401
1266
+ list_user_following, # noqa: F401
1267
+ list_webhooks, # noqa: F401
1268
+ merge_pull_request, # noqa: F401
1269
+ model_info, # noqa: F401
1270
+ move_repo, # noqa: F401
1271
+ paper_info, # noqa: F401
1272
+ parse_safetensors_file_metadata, # noqa: F401
1273
+ pause_inference_endpoint, # noqa: F401
1274
+ pause_space, # noqa: F401
1275
+ permanently_delete_lfs_files, # noqa: F401
1276
+ preupload_lfs_files, # noqa: F401
1277
+ reject_access_request, # noqa: F401
1278
+ rename_discussion, # noqa: F401
1279
+ repo_exists, # noqa: F401
1280
+ repo_info, # noqa: F401
1281
+ repo_type_and_id_from_hf_id, # noqa: F401
1282
+ request_space_hardware, # noqa: F401
1283
+ request_space_storage, # noqa: F401
1284
+ restart_space, # noqa: F401
1285
+ resume_inference_endpoint, # noqa: F401
1286
+ resume_scheduled_job, # noqa: F401
1287
+ revision_exists, # noqa: F401
1288
+ run_as_future, # noqa: F401
1289
+ run_job, # noqa: F401
1290
+ run_uv_job, # noqa: F401
1291
+ scale_to_zero_inference_endpoint, # noqa: F401
1292
+ set_space_sleep_time, # noqa: F401
1293
+ space_info, # noqa: F401
1294
+ super_squash_history, # noqa: F401
1295
+ suspend_scheduled_job, # noqa: F401
1296
+ unlike, # noqa: F401
1297
+ update_collection_item, # noqa: F401
1298
+ update_collection_metadata, # noqa: F401
1299
+ update_inference_endpoint, # noqa: F401
1300
+ update_repo_settings, # noqa: F401
1301
+ update_repo_visibility, # noqa: F401
1302
+ update_webhook, # noqa: F401
1303
+ upload_file, # noqa: F401
1304
+ upload_folder, # noqa: F401
1305
+ upload_large_folder, # noqa: F401
1306
+ whoami, # noqa: F401
1307
+ )
1308
+ from .hf_file_system import (
1309
+ HfFileSystem, # noqa: F401
1310
+ HfFileSystemFile, # noqa: F401
1311
+ HfFileSystemResolvedPath, # noqa: F401
1312
+ HfFileSystemStreamFile, # noqa: F401
1313
+ )
1314
+ from .hub_mixin import (
1315
+ ModelHubMixin, # noqa: F401
1316
+ PyTorchModelHubMixin, # noqa: F401
1317
+ )
1318
+ from .inference._client import (
1319
+ InferenceClient, # noqa: F401
1320
+ InferenceTimeoutError, # noqa: F401
1321
+ )
1322
+ from .inference._generated._async_client import AsyncInferenceClient # noqa: F401
1323
+ from .inference._generated.types import (
1324
+ AudioClassificationInput, # noqa: F401
1325
+ AudioClassificationOutputElement, # noqa: F401
1326
+ AudioClassificationOutputTransform, # noqa: F401
1327
+ AudioClassificationParameters, # noqa: F401
1328
+ AudioToAudioInput, # noqa: F401
1329
+ AudioToAudioOutputElement, # noqa: F401
1330
+ AutomaticSpeechRecognitionEarlyStoppingEnum, # noqa: F401
1331
+ AutomaticSpeechRecognitionGenerationParameters, # noqa: F401
1332
+ AutomaticSpeechRecognitionInput, # noqa: F401
1333
+ AutomaticSpeechRecognitionOutput, # noqa: F401
1334
+ AutomaticSpeechRecognitionOutputChunk, # noqa: F401
1335
+ AutomaticSpeechRecognitionParameters, # noqa: F401
1336
+ ChatCompletionInput, # noqa: F401
1337
+ ChatCompletionInputFunctionDefinition, # noqa: F401
1338
+ ChatCompletionInputFunctionName, # noqa: F401
1339
+ ChatCompletionInputGrammarType, # noqa: F401
1340
+ ChatCompletionInputJSONSchema, # noqa: F401
1341
+ ChatCompletionInputMessage, # noqa: F401
1342
+ ChatCompletionInputMessageChunk, # noqa: F401
1343
+ ChatCompletionInputMessageChunkType, # noqa: F401
1344
+ ChatCompletionInputResponseFormatJSONObject, # noqa: F401
1345
+ ChatCompletionInputResponseFormatJSONSchema, # noqa: F401
1346
+ ChatCompletionInputResponseFormatText, # noqa: F401
1347
+ ChatCompletionInputStreamOptions, # noqa: F401
1348
+ ChatCompletionInputTool, # noqa: F401
1349
+ ChatCompletionInputToolCall, # noqa: F401
1350
+ ChatCompletionInputToolChoiceClass, # noqa: F401
1351
+ ChatCompletionInputToolChoiceEnum, # noqa: F401
1352
+ ChatCompletionInputURL, # noqa: F401
1353
+ ChatCompletionOutput, # noqa: F401
1354
+ ChatCompletionOutputComplete, # noqa: F401
1355
+ ChatCompletionOutputFunctionDefinition, # noqa: F401
1356
+ ChatCompletionOutputLogprob, # noqa: F401
1357
+ ChatCompletionOutputLogprobs, # noqa: F401
1358
+ ChatCompletionOutputMessage, # noqa: F401
1359
+ ChatCompletionOutputToolCall, # noqa: F401
1360
+ ChatCompletionOutputTopLogprob, # noqa: F401
1361
+ ChatCompletionOutputUsage, # noqa: F401
1362
+ ChatCompletionStreamOutput, # noqa: F401
1363
+ ChatCompletionStreamOutputChoice, # noqa: F401
1364
+ ChatCompletionStreamOutputDelta, # noqa: F401
1365
+ ChatCompletionStreamOutputDeltaToolCall, # noqa: F401
1366
+ ChatCompletionStreamOutputFunction, # noqa: F401
1367
+ ChatCompletionStreamOutputLogprob, # noqa: F401
1368
+ ChatCompletionStreamOutputLogprobs, # noqa: F401
1369
+ ChatCompletionStreamOutputTopLogprob, # noqa: F401
1370
+ ChatCompletionStreamOutputUsage, # noqa: F401
1371
+ DepthEstimationInput, # noqa: F401
1372
+ DepthEstimationOutput, # noqa: F401
1373
+ DocumentQuestionAnsweringInput, # noqa: F401
1374
+ DocumentQuestionAnsweringInputData, # noqa: F401
1375
+ DocumentQuestionAnsweringOutputElement, # noqa: F401
1376
+ DocumentQuestionAnsweringParameters, # noqa: F401
1377
+ FeatureExtractionInput, # noqa: F401
1378
+ FeatureExtractionInputTruncationDirection, # noqa: F401
1379
+ FillMaskInput, # noqa: F401
1380
+ FillMaskOutputElement, # noqa: F401
1381
+ FillMaskParameters, # noqa: F401
1382
+ ImageClassificationInput, # noqa: F401
1383
+ ImageClassificationOutputElement, # noqa: F401
1384
+ ImageClassificationOutputTransform, # noqa: F401
1385
+ ImageClassificationParameters, # noqa: F401
1386
+ ImageSegmentationInput, # noqa: F401
1387
+ ImageSegmentationOutputElement, # noqa: F401
1388
+ ImageSegmentationParameters, # noqa: F401
1389
+ ImageSegmentationSubtask, # noqa: F401
1390
+ ImageToImageInput, # noqa: F401
1391
+ ImageToImageOutput, # noqa: F401
1392
+ ImageToImageParameters, # noqa: F401
1393
+ ImageToImageTargetSize, # noqa: F401
1394
+ ImageToTextEarlyStoppingEnum, # noqa: F401
1395
+ ImageToTextGenerationParameters, # noqa: F401
1396
+ ImageToTextInput, # noqa: F401
1397
+ ImageToTextOutput, # noqa: F401
1398
+ ImageToTextParameters, # noqa: F401
1399
+ ImageToVideoInput, # noqa: F401
1400
+ ImageToVideoOutput, # noqa: F401
1401
+ ImageToVideoParameters, # noqa: F401
1402
+ ImageToVideoTargetSize, # noqa: F401
1403
+ ObjectDetectionBoundingBox, # noqa: F401
1404
+ ObjectDetectionInput, # noqa: F401
1405
+ ObjectDetectionOutputElement, # noqa: F401
1406
+ ObjectDetectionParameters, # noqa: F401
1407
+ Padding, # noqa: F401
1408
+ QuestionAnsweringInput, # noqa: F401
1409
+ QuestionAnsweringInputData, # noqa: F401
1410
+ QuestionAnsweringOutputElement, # noqa: F401
1411
+ QuestionAnsweringParameters, # noqa: F401
1412
+ SentenceSimilarityInput, # noqa: F401
1413
+ SentenceSimilarityInputData, # noqa: F401
1414
+ SummarizationInput, # noqa: F401
1415
+ SummarizationOutput, # noqa: F401
1416
+ SummarizationParameters, # noqa: F401
1417
+ SummarizationTruncationStrategy, # noqa: F401
1418
+ TableQuestionAnsweringInput, # noqa: F401
1419
+ TableQuestionAnsweringInputData, # noqa: F401
1420
+ TableQuestionAnsweringOutputElement, # noqa: F401
1421
+ TableQuestionAnsweringParameters, # noqa: F401
1422
+ Text2TextGenerationInput, # noqa: F401
1423
+ Text2TextGenerationOutput, # noqa: F401
1424
+ Text2TextGenerationParameters, # noqa: F401
1425
+ Text2TextGenerationTruncationStrategy, # noqa: F401
1426
+ TextClassificationInput, # noqa: F401
1427
+ TextClassificationOutputElement, # noqa: F401
1428
+ TextClassificationOutputTransform, # noqa: F401
1429
+ TextClassificationParameters, # noqa: F401
1430
+ TextGenerationInput, # noqa: F401
1431
+ TextGenerationInputGenerateParameters, # noqa: F401
1432
+ TextGenerationInputGrammarType, # noqa: F401
1433
+ TextGenerationOutput, # noqa: F401
1434
+ TextGenerationOutputBestOfSequence, # noqa: F401
1435
+ TextGenerationOutputDetails, # noqa: F401
1436
+ TextGenerationOutputFinishReason, # noqa: F401
1437
+ TextGenerationOutputPrefillToken, # noqa: F401
1438
+ TextGenerationOutputToken, # noqa: F401
1439
+ TextGenerationStreamOutput, # noqa: F401
1440
+ TextGenerationStreamOutputStreamDetails, # noqa: F401
1441
+ TextGenerationStreamOutputToken, # noqa: F401
1442
+ TextToAudioEarlyStoppingEnum, # noqa: F401
1443
+ TextToAudioGenerationParameters, # noqa: F401
1444
+ TextToAudioInput, # noqa: F401
1445
+ TextToAudioOutput, # noqa: F401
1446
+ TextToAudioParameters, # noqa: F401
1447
+ TextToImageInput, # noqa: F401
1448
+ TextToImageOutput, # noqa: F401
1449
+ TextToImageParameters, # noqa: F401
1450
+ TextToSpeechEarlyStoppingEnum, # noqa: F401
1451
+ TextToSpeechGenerationParameters, # noqa: F401
1452
+ TextToSpeechInput, # noqa: F401
1453
+ TextToSpeechOutput, # noqa: F401
1454
+ TextToSpeechParameters, # noqa: F401
1455
+ TextToVideoInput, # noqa: F401
1456
+ TextToVideoOutput, # noqa: F401
1457
+ TextToVideoParameters, # noqa: F401
1458
+ TokenClassificationAggregationStrategy, # noqa: F401
1459
+ TokenClassificationInput, # noqa: F401
1460
+ TokenClassificationOutputElement, # noqa: F401
1461
+ TokenClassificationParameters, # noqa: F401
1462
+ TranslationInput, # noqa: F401
1463
+ TranslationOutput, # noqa: F401
1464
+ TranslationParameters, # noqa: F401
1465
+ TranslationTruncationStrategy, # noqa: F401
1466
+ TypeEnum, # noqa: F401
1467
+ VideoClassificationInput, # noqa: F401
1468
+ VideoClassificationOutputElement, # noqa: F401
1469
+ VideoClassificationOutputTransform, # noqa: F401
1470
+ VideoClassificationParameters, # noqa: F401
1471
+ VisualQuestionAnsweringInput, # noqa: F401
1472
+ VisualQuestionAnsweringInputData, # noqa: F401
1473
+ VisualQuestionAnsweringOutputElement, # noqa: F401
1474
+ VisualQuestionAnsweringParameters, # noqa: F401
1475
+ ZeroShotClassificationInput, # noqa: F401
1476
+ ZeroShotClassificationOutputElement, # noqa: F401
1477
+ ZeroShotClassificationParameters, # noqa: F401
1478
+ ZeroShotImageClassificationInput, # noqa: F401
1479
+ ZeroShotImageClassificationOutputElement, # noqa: F401
1480
+ ZeroShotImageClassificationParameters, # noqa: F401
1481
+ ZeroShotObjectDetectionBoundingBox, # noqa: F401
1482
+ ZeroShotObjectDetectionInput, # noqa: F401
1483
+ ZeroShotObjectDetectionOutputElement, # noqa: F401
1484
+ ZeroShotObjectDetectionParameters, # noqa: F401
1485
+ )
1486
+ from .inference._mcp.agent import Agent # noqa: F401
1487
+ from .inference._mcp.mcp_client import MCPClient # noqa: F401
1488
+ from .inference_api import InferenceApi # noqa: F401
1489
+ from .keras_mixin import (
1490
+ KerasModelHubMixin, # noqa: F401
1491
+ from_pretrained_keras, # noqa: F401
1492
+ push_to_hub_keras, # noqa: F401
1493
+ save_pretrained_keras, # noqa: F401
1494
+ )
1495
+ from .repocard import (
1496
+ DatasetCard, # noqa: F401
1497
+ ModelCard, # noqa: F401
1498
+ RepoCard, # noqa: F401
1499
+ SpaceCard, # noqa: F401
1500
+ metadata_eval_result, # noqa: F401
1501
+ metadata_load, # noqa: F401
1502
+ metadata_save, # noqa: F401
1503
+ metadata_update, # noqa: F401
1504
+ )
1505
+ from .repocard_data import (
1506
+ CardData, # noqa: F401
1507
+ DatasetCardData, # noqa: F401
1508
+ EvalResult, # noqa: F401
1509
+ ModelCardData, # noqa: F401
1510
+ SpaceCardData, # noqa: F401
1511
+ )
1512
+ from .repository import Repository # noqa: F401
1513
+ from .serialization import (
1514
+ StateDictSplit, # noqa: F401
1515
+ get_tf_storage_size, # noqa: F401
1516
+ get_torch_storage_id, # noqa: F401
1517
+ get_torch_storage_size, # noqa: F401
1518
+ load_state_dict_from_file, # noqa: F401
1519
+ load_torch_model, # noqa: F401
1520
+ save_torch_model, # noqa: F401
1521
+ save_torch_state_dict, # noqa: F401
1522
+ split_state_dict_into_shards_factory, # noqa: F401
1523
+ split_tf_state_dict_into_shards, # noqa: F401
1524
+ split_torch_state_dict_into_shards, # noqa: F401
1525
+ )
1526
+ from .serialization._dduf import (
1527
+ DDUFEntry, # noqa: F401
1528
+ export_entries_as_dduf, # noqa: F401
1529
+ export_folder_as_dduf, # noqa: F401
1530
+ read_dduf_file, # noqa: F401
1531
+ )
1532
+ from .utils import (
1533
+ CachedFileInfo, # noqa: F401
1534
+ CachedRepoInfo, # noqa: F401
1535
+ CachedRevisionInfo, # noqa: F401
1536
+ CacheNotFound, # noqa: F401
1537
+ CorruptedCacheException, # noqa: F401
1538
+ DeleteCacheStrategy, # noqa: F401
1539
+ HFCacheInfo, # noqa: F401
1540
+ HfFolder, # noqa: F401
1541
+ cached_assets_path, # noqa: F401
1542
+ configure_http_backend, # noqa: F401
1543
+ dump_environment_info, # noqa: F401
1544
+ get_session, # noqa: F401
1545
+ get_token, # noqa: F401
1546
+ logging, # noqa: F401
1547
+ scan_cache_dir, # noqa: F401
1548
+ )
venv/lib/python3.13/site-packages/huggingface_hub/_commit_api.py ADDED
@@ -0,0 +1,908 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Type definitions and utilities for the `create_commit` API
3
+ """
4
+
5
+ import base64
6
+ import io
7
+ import os
8
+ import warnings
9
+ from collections import defaultdict
10
+ from contextlib import contextmanager
11
+ from dataclasses import dataclass, field
12
+ from itertools import groupby
13
+ from pathlib import Path, PurePosixPath
14
+ from typing import TYPE_CHECKING, Any, BinaryIO, Dict, Iterable, Iterator, List, Literal, Optional, Tuple, Union
15
+
16
+ from tqdm.contrib.concurrent import thread_map
17
+
18
+ from . import constants
19
+ from .errors import EntryNotFoundError, HfHubHTTPError, XetAuthorizationError, XetRefreshTokenError
20
+ from .file_download import hf_hub_url
21
+ from .lfs import UploadInfo, lfs_upload, post_lfs_batch_info
22
+ from .utils import (
23
+ FORBIDDEN_FOLDERS,
24
+ XetTokenType,
25
+ are_progress_bars_disabled,
26
+ chunk_iterable,
27
+ fetch_xet_connection_info_from_repo_info,
28
+ get_session,
29
+ hf_raise_for_status,
30
+ logging,
31
+ sha,
32
+ tqdm_stream_file,
33
+ validate_hf_hub_args,
34
+ )
35
+ from .utils import tqdm as hf_tqdm
36
+
37
+
38
+ if TYPE_CHECKING:
39
+ from .hf_api import RepoFile
40
+
41
+
42
+ logger = logging.get_logger(__name__)
43
+
44
+
45
+ UploadMode = Literal["lfs", "regular"]
46
+
47
+ # Max is 1,000 per request on the Hub for HfApi.get_paths_info
48
+ # Otherwise we get:
49
+ # HfHubHTTPError: 413 Client Error: Payload Too Large for url: https://huggingface.co/api/datasets/xxx (Request ID: xxx)\n\ntoo many parameters
50
+ # See https://github.com/huggingface/huggingface_hub/issues/1503
51
+ FETCH_LFS_BATCH_SIZE = 500
52
+
53
+ UPLOAD_BATCH_MAX_NUM_FILES = 256
54
+
55
+
56
+ @dataclass
57
+ class CommitOperationDelete:
58
+ """
59
+ Data structure holding necessary info to delete a file or a folder from a repository
60
+ on the Hub.
61
+
62
+ Args:
63
+ path_in_repo (`str`):
64
+ Relative filepath in the repo, for example: `"checkpoints/1fec34a/weights.bin"`
65
+ for a file or `"checkpoints/1fec34a/"` for a folder.
66
+ is_folder (`bool` or `Literal["auto"]`, *optional*)
67
+ Whether the Delete Operation applies to a folder or not. If "auto", the path
68
+ type (file or folder) is guessed automatically by looking if path ends with
69
+ a "/" (folder) or not (file). To explicitly set the path type, you can set
70
+ `is_folder=True` or `is_folder=False`.
71
+ """
72
+
73
+ path_in_repo: str
74
+ is_folder: Union[bool, Literal["auto"]] = "auto"
75
+
76
+ def __post_init__(self):
77
+ self.path_in_repo = _validate_path_in_repo(self.path_in_repo)
78
+
79
+ if self.is_folder == "auto":
80
+ self.is_folder = self.path_in_repo.endswith("/")
81
+ if not isinstance(self.is_folder, bool):
82
+ raise ValueError(
83
+ f"Wrong value for `is_folder`. Must be one of [`True`, `False`, `'auto'`]. Got '{self.is_folder}'."
84
+ )
85
+
86
+
87
+ @dataclass
88
+ class CommitOperationCopy:
89
+ """
90
+ Data structure holding necessary info to copy a file in a repository on the Hub.
91
+
92
+ Limitations:
93
+ - Only LFS files can be copied. To copy a regular file, you need to download it locally and re-upload it
94
+ - Cross-repository copies are not supported.
95
+
96
+ Note: you can combine a [`CommitOperationCopy`] and a [`CommitOperationDelete`] to rename an LFS file on the Hub.
97
+
98
+ Args:
99
+ src_path_in_repo (`str`):
100
+ Relative filepath in the repo of the file to be copied, e.g. `"checkpoints/1fec34a/weights.bin"`.
101
+ path_in_repo (`str`):
102
+ Relative filepath in the repo where to copy the file, e.g. `"checkpoints/1fec34a/weights_copy.bin"`.
103
+ src_revision (`str`, *optional*):
104
+ The git revision of the file to be copied. Can be any valid git revision.
105
+ Default to the target commit revision.
106
+ """
107
+
108
+ src_path_in_repo: str
109
+ path_in_repo: str
110
+ src_revision: Optional[str] = None
111
+ # set to the OID of the file to be copied if it has already been uploaded
112
+ # useful to determine if a commit will be empty or not.
113
+ _src_oid: Optional[str] = None
114
+ # set to the OID of the file to copy to if it has already been uploaded
115
+ # useful to determine if a commit will be empty or not.
116
+ _dest_oid: Optional[str] = None
117
+
118
+ def __post_init__(self):
119
+ self.src_path_in_repo = _validate_path_in_repo(self.src_path_in_repo)
120
+ self.path_in_repo = _validate_path_in_repo(self.path_in_repo)
121
+
122
+
123
+ @dataclass
124
+ class CommitOperationAdd:
125
+ """
126
+ Data structure holding necessary info to upload a file to a repository on the Hub.
127
+
128
+ Args:
129
+ path_in_repo (`str`):
130
+ Relative filepath in the repo, for example: `"checkpoints/1fec34a/weights.bin"`
131
+ path_or_fileobj (`str`, `Path`, `bytes`, or `BinaryIO`):
132
+ Either:
133
+ - a path to a local file (as `str` or `pathlib.Path`) to upload
134
+ - a buffer of bytes (`bytes`) holding the content of the file to upload
135
+ - a "file object" (subclass of `io.BufferedIOBase`), typically obtained
136
+ with `open(path, "rb")`. It must support `seek()` and `tell()` methods.
137
+
138
+ Raises:
139
+ [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
140
+ If `path_or_fileobj` is not one of `str`, `Path`, `bytes` or `io.BufferedIOBase`.
141
+ [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
142
+ If `path_or_fileobj` is a `str` or `Path` but not a path to an existing file.
143
+ [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
144
+ If `path_or_fileobj` is a `io.BufferedIOBase` but it doesn't support both
145
+ `seek()` and `tell()`.
146
+ """
147
+
148
+ path_in_repo: str
149
+ path_or_fileobj: Union[str, Path, bytes, BinaryIO]
150
+ upload_info: UploadInfo = field(init=False, repr=False)
151
+
152
+ # Internal attributes
153
+
154
+ # set to "lfs" or "regular" once known
155
+ _upload_mode: Optional[UploadMode] = field(init=False, repr=False, default=None)
156
+
157
+ # set to True if .gitignore rules prevent the file from being uploaded as LFS
158
+ # (server-side check)
159
+ _should_ignore: Optional[bool] = field(init=False, repr=False, default=None)
160
+
161
+ # set to the remote OID of the file if it has already been uploaded
162
+ # useful to determine if a commit will be empty or not
163
+ _remote_oid: Optional[str] = field(init=False, repr=False, default=None)
164
+
165
+ # set to True once the file has been uploaded as LFS
166
+ _is_uploaded: bool = field(init=False, repr=False, default=False)
167
+
168
+ # set to True once the file has been committed
169
+ _is_committed: bool = field(init=False, repr=False, default=False)
170
+
171
+ def __post_init__(self) -> None:
172
+ """Validates `path_or_fileobj` and compute `upload_info`."""
173
+ self.path_in_repo = _validate_path_in_repo(self.path_in_repo)
174
+
175
+ # Validate `path_or_fileobj` value
176
+ if isinstance(self.path_or_fileobj, Path):
177
+ self.path_or_fileobj = str(self.path_or_fileobj)
178
+ if isinstance(self.path_or_fileobj, str):
179
+ path_or_fileobj = os.path.normpath(os.path.expanduser(self.path_or_fileobj))
180
+ if not os.path.isfile(path_or_fileobj):
181
+ raise ValueError(f"Provided path: '{path_or_fileobj}' is not a file on the local file system")
182
+ elif not isinstance(self.path_or_fileobj, (io.BufferedIOBase, bytes)):
183
+ # ^^ Inspired from: https://stackoverflow.com/questions/44584829/how-to-determine-if-file-is-opened-in-binary-or-text-mode
184
+ raise ValueError(
185
+ "path_or_fileobj must be either an instance of str, bytes or"
186
+ " io.BufferedIOBase. If you passed a file-like object, make sure it is"
187
+ " in binary mode."
188
+ )
189
+ if isinstance(self.path_or_fileobj, io.BufferedIOBase):
190
+ try:
191
+ self.path_or_fileobj.tell()
192
+ self.path_or_fileobj.seek(0, os.SEEK_CUR)
193
+ except (OSError, AttributeError) as exc:
194
+ raise ValueError(
195
+ "path_or_fileobj is a file-like object but does not implement seek() and tell()"
196
+ ) from exc
197
+
198
+ # Compute "upload_info" attribute
199
+ if isinstance(self.path_or_fileobj, str):
200
+ self.upload_info = UploadInfo.from_path(self.path_or_fileobj)
201
+ elif isinstance(self.path_or_fileobj, bytes):
202
+ self.upload_info = UploadInfo.from_bytes(self.path_or_fileobj)
203
+ else:
204
+ self.upload_info = UploadInfo.from_fileobj(self.path_or_fileobj)
205
+
206
+ @contextmanager
207
+ def as_file(self, with_tqdm: bool = False) -> Iterator[BinaryIO]:
208
+ """
209
+ A context manager that yields a file-like object allowing to read the underlying
210
+ data behind `path_or_fileobj`.
211
+
212
+ Args:
213
+ with_tqdm (`bool`, *optional*, defaults to `False`):
214
+ If True, iterating over the file object will display a progress bar. Only
215
+ works if the file-like object is a path to a file. Pure bytes and buffers
216
+ are not supported.
217
+
218
+ Example:
219
+
220
+ ```python
221
+ >>> operation = CommitOperationAdd(
222
+ ... path_in_repo="remote/dir/weights.h5",
223
+ ... path_or_fileobj="./local/weights.h5",
224
+ ... )
225
+ CommitOperationAdd(path_in_repo='remote/dir/weights.h5', path_or_fileobj='./local/weights.h5')
226
+
227
+ >>> with operation.as_file() as file:
228
+ ... content = file.read()
229
+
230
+ >>> with operation.as_file(with_tqdm=True) as file:
231
+ ... while True:
232
+ ... data = file.read(1024)
233
+ ... if not data:
234
+ ... break
235
+ config.json: 100%|█████████████████████████| 8.19k/8.19k [00:02<00:00, 3.72kB/s]
236
+
237
+ >>> with operation.as_file(with_tqdm=True) as file:
238
+ ... requests.put(..., data=file)
239
+ config.json: 100%|█████████████████████████| 8.19k/8.19k [00:02<00:00, 3.72kB/s]
240
+ ```
241
+ """
242
+ if isinstance(self.path_or_fileobj, str) or isinstance(self.path_or_fileobj, Path):
243
+ if with_tqdm:
244
+ with tqdm_stream_file(self.path_or_fileobj) as file:
245
+ yield file
246
+ else:
247
+ with open(self.path_or_fileobj, "rb") as file:
248
+ yield file
249
+ elif isinstance(self.path_or_fileobj, bytes):
250
+ yield io.BytesIO(self.path_or_fileobj)
251
+ elif isinstance(self.path_or_fileobj, io.BufferedIOBase):
252
+ prev_pos = self.path_or_fileobj.tell()
253
+ yield self.path_or_fileobj
254
+ self.path_or_fileobj.seek(prev_pos, io.SEEK_SET)
255
+
256
+ def b64content(self) -> bytes:
257
+ """
258
+ The base64-encoded content of `path_or_fileobj`
259
+
260
+ Returns: `bytes`
261
+ """
262
+ with self.as_file() as file:
263
+ return base64.b64encode(file.read())
264
+
265
+ @property
266
+ def _local_oid(self) -> Optional[str]:
267
+ """Return the OID of the local file.
268
+
269
+ This OID is then compared to `self._remote_oid` to check if the file has changed compared to the remote one.
270
+ If the file did not change, we won't upload it again to prevent empty commits.
271
+
272
+ For LFS files, the OID corresponds to the SHA256 of the file content (used a LFS ref).
273
+ For regular files, the OID corresponds to the SHA1 of the file content.
274
+ Note: this is slightly different to git OID computation since the oid of an LFS file is usually the git-SHA1 of the
275
+ pointer file content (not the actual file content). However, using the SHA256 is enough to detect changes
276
+ and more convenient client-side.
277
+ """
278
+ if self._upload_mode is None:
279
+ return None
280
+ elif self._upload_mode == "lfs":
281
+ return self.upload_info.sha256.hex()
282
+ else:
283
+ # Regular file => compute sha1
284
+ # => no need to read by chunk since the file is guaranteed to be <=5MB.
285
+ with self.as_file() as file:
286
+ return sha.git_hash(file.read())
287
+
288
+
289
+ def _validate_path_in_repo(path_in_repo: str) -> str:
290
+ # Validate `path_in_repo` value to prevent a server-side issue
291
+ if path_in_repo.startswith("/"):
292
+ path_in_repo = path_in_repo[1:]
293
+ if path_in_repo == "." or path_in_repo == ".." or path_in_repo.startswith("../"):
294
+ raise ValueError(f"Invalid `path_in_repo` in CommitOperation: '{path_in_repo}'")
295
+ if path_in_repo.startswith("./"):
296
+ path_in_repo = path_in_repo[2:]
297
+ for forbidden in FORBIDDEN_FOLDERS:
298
+ if any(part == forbidden for part in path_in_repo.split("/")):
299
+ raise ValueError(
300
+ f"Invalid `path_in_repo` in CommitOperation: cannot update files under a '{forbidden}/' folder (path:"
301
+ f" '{path_in_repo}')."
302
+ )
303
+ return path_in_repo
304
+
305
+
306
+ CommitOperation = Union[CommitOperationAdd, CommitOperationCopy, CommitOperationDelete]
307
+
308
+
309
+ def _warn_on_overwriting_operations(operations: List[CommitOperation]) -> None:
310
+ """
311
+ Warn user when a list of operations is expected to overwrite itself in a single
312
+ commit.
313
+
314
+ Rules:
315
+ - If a filepath is updated by multiple `CommitOperationAdd` operations, a warning
316
+ message is triggered.
317
+ - If a filepath is updated at least once by a `CommitOperationAdd` and then deleted
318
+ by a `CommitOperationDelete`, a warning is triggered.
319
+ - If a `CommitOperationDelete` deletes a filepath that is then updated by a
320
+ `CommitOperationAdd`, no warning is triggered. This is usually useless (no need to
321
+ delete before upload) but can happen if a user deletes an entire folder and then
322
+ add new files to it.
323
+ """
324
+ nb_additions_per_path: Dict[str, int] = defaultdict(int)
325
+ for operation in operations:
326
+ path_in_repo = operation.path_in_repo
327
+ if isinstance(operation, CommitOperationAdd):
328
+ if nb_additions_per_path[path_in_repo] > 0:
329
+ warnings.warn(
330
+ "About to update multiple times the same file in the same commit:"
331
+ f" '{path_in_repo}'. This can cause undesired inconsistencies in"
332
+ " your repo."
333
+ )
334
+ nb_additions_per_path[path_in_repo] += 1
335
+ for parent in PurePosixPath(path_in_repo).parents:
336
+ # Also keep track of number of updated files per folder
337
+ # => warns if deleting a folder overwrite some contained files
338
+ nb_additions_per_path[str(parent)] += 1
339
+ if isinstance(operation, CommitOperationDelete):
340
+ if nb_additions_per_path[str(PurePosixPath(path_in_repo))] > 0:
341
+ if operation.is_folder:
342
+ warnings.warn(
343
+ "About to delete a folder containing files that have just been"
344
+ f" updated within the same commit: '{path_in_repo}'. This can"
345
+ " cause undesired inconsistencies in your repo."
346
+ )
347
+ else:
348
+ warnings.warn(
349
+ "About to delete a file that have just been updated within the"
350
+ f" same commit: '{path_in_repo}'. This can cause undesired"
351
+ " inconsistencies in your repo."
352
+ )
353
+
354
+
355
+ @validate_hf_hub_args
356
+ def _upload_lfs_files(
357
+ *,
358
+ additions: List[CommitOperationAdd],
359
+ repo_type: str,
360
+ repo_id: str,
361
+ headers: Dict[str, str],
362
+ endpoint: Optional[str] = None,
363
+ num_threads: int = 5,
364
+ revision: Optional[str] = None,
365
+ ):
366
+ """
367
+ Uploads the content of `additions` to the Hub using the large file storage protocol.
368
+
369
+ Relevant external documentation:
370
+ - LFS Batch API: https://github.com/git-lfs/git-lfs/blob/main/docs/api/batch.md
371
+
372
+ Args:
373
+ additions (`List` of `CommitOperationAdd`):
374
+ The files to be uploaded
375
+ repo_type (`str`):
376
+ Type of the repo to upload to: `"model"`, `"dataset"` or `"space"`.
377
+ repo_id (`str`):
378
+ A namespace (user or an organization) and a repo name separated
379
+ by a `/`.
380
+ headers (`Dict[str, str]`):
381
+ Headers to use for the request, including authorization headers and user agent.
382
+ num_threads (`int`, *optional*):
383
+ The number of concurrent threads to use when uploading. Defaults to 5.
384
+ revision (`str`, *optional*):
385
+ The git revision to upload to.
386
+
387
+ Raises:
388
+ [`EnvironmentError`](https://docs.python.org/3/library/exceptions.html#EnvironmentError)
389
+ If an upload failed for any reason
390
+ [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
391
+ If the server returns malformed responses
392
+ [`HTTPError`](https://requests.readthedocs.io/en/latest/api/#requests.HTTPError)
393
+ If the LFS batch endpoint returned an HTTP error.
394
+ """
395
+ # Step 1: retrieve upload instructions from the LFS batch endpoint.
396
+ # Upload instructions are retrieved by chunk of 256 files to avoid reaching
397
+ # the payload limit.
398
+ batch_actions: List[Dict] = []
399
+ for chunk in chunk_iterable(additions, chunk_size=UPLOAD_BATCH_MAX_NUM_FILES):
400
+ batch_actions_chunk, batch_errors_chunk = post_lfs_batch_info(
401
+ upload_infos=[op.upload_info for op in chunk],
402
+ repo_id=repo_id,
403
+ repo_type=repo_type,
404
+ revision=revision,
405
+ endpoint=endpoint,
406
+ headers=headers,
407
+ token=None, # already passed in 'headers'
408
+ )
409
+
410
+ # If at least 1 error, we do not retrieve information for other chunks
411
+ if batch_errors_chunk:
412
+ message = "\n".join(
413
+ [
414
+ f"Encountered error for file with OID {err.get('oid')}: `{err.get('error', {}).get('message')}"
415
+ for err in batch_errors_chunk
416
+ ]
417
+ )
418
+ raise ValueError(f"LFS batch endpoint returned errors:\n{message}")
419
+
420
+ batch_actions += batch_actions_chunk
421
+ oid2addop = {add_op.upload_info.sha256.hex(): add_op for add_op in additions}
422
+
423
+ # Step 2: ignore files that have already been uploaded
424
+ filtered_actions = []
425
+ for action in batch_actions:
426
+ if action.get("actions") is None:
427
+ logger.debug(
428
+ f"Content of file {oid2addop[action['oid']].path_in_repo} is already"
429
+ " present upstream - skipping upload."
430
+ )
431
+ else:
432
+ filtered_actions.append(action)
433
+
434
+ if len(filtered_actions) == 0:
435
+ logger.debug("No LFS files to upload.")
436
+ return
437
+
438
+ # Step 3: upload files concurrently according to these instructions
439
+ def _wrapped_lfs_upload(batch_action) -> None:
440
+ try:
441
+ operation = oid2addop[batch_action["oid"]]
442
+ lfs_upload(operation=operation, lfs_batch_action=batch_action, headers=headers, endpoint=endpoint)
443
+ except Exception as exc:
444
+ raise RuntimeError(f"Error while uploading '{operation.path_in_repo}' to the Hub.") from exc
445
+
446
+ if constants.HF_HUB_ENABLE_HF_TRANSFER:
447
+ logger.debug(f"Uploading {len(filtered_actions)} LFS files to the Hub using `hf_transfer`.")
448
+ for action in hf_tqdm(filtered_actions, name="huggingface_hub.lfs_upload"):
449
+ _wrapped_lfs_upload(action)
450
+ elif len(filtered_actions) == 1:
451
+ logger.debug("Uploading 1 LFS file to the Hub")
452
+ _wrapped_lfs_upload(filtered_actions[0])
453
+ else:
454
+ logger.debug(
455
+ f"Uploading {len(filtered_actions)} LFS files to the Hub using up to {num_threads} threads concurrently"
456
+ )
457
+ thread_map(
458
+ _wrapped_lfs_upload,
459
+ filtered_actions,
460
+ desc=f"Upload {len(filtered_actions)} LFS files",
461
+ max_workers=num_threads,
462
+ tqdm_class=hf_tqdm,
463
+ )
464
+
465
+
466
+ @validate_hf_hub_args
467
+ def _upload_xet_files(
468
+ *,
469
+ additions: List[CommitOperationAdd],
470
+ repo_type: str,
471
+ repo_id: str,
472
+ headers: Dict[str, str],
473
+ endpoint: Optional[str] = None,
474
+ revision: Optional[str] = None,
475
+ create_pr: Optional[bool] = None,
476
+ ):
477
+ """
478
+ Uploads the content of `additions` to the Hub using the xet storage protocol.
479
+ This chunks the files and deduplicates the chunks before uploading them to xetcas storage.
480
+
481
+ Args:
482
+ additions (`List` of `CommitOperationAdd`):
483
+ The files to be uploaded.
484
+ repo_type (`str`):
485
+ Type of the repo to upload to: `"model"`, `"dataset"` or `"space"`.
486
+ repo_id (`str`):
487
+ A namespace (user or an organization) and a repo name separated
488
+ by a `/`.
489
+ headers (`Dict[str, str]`):
490
+ Headers to use for the request, including authorization headers and user agent.
491
+ endpoint: (`str`, *optional*):
492
+ The endpoint to use for the xetcas service. Defaults to `constants.ENDPOINT`.
493
+ revision (`str`, *optional*):
494
+ The git revision to upload to.
495
+ create_pr (`bool`, *optional*):
496
+ Whether or not to create a Pull Request with that commit.
497
+
498
+ Raises:
499
+ [`EnvironmentError`](https://docs.python.org/3/library/exceptions.html#EnvironmentError)
500
+ If an upload failed for any reason.
501
+ [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
502
+ If the server returns malformed responses or if the user is unauthorized to upload to xet storage.
503
+ [`HTTPError`](https://requests.readthedocs.io/en/latest/api/#requests.HTTPError)
504
+ If the LFS batch endpoint returned an HTTP error.
505
+
506
+ **How it works:**
507
+ The file download system uses Xet storage, which is a content-addressable storage system that breaks files into chunks
508
+ for efficient storage and transfer.
509
+
510
+ `hf_xet.upload_files` manages uploading files by:
511
+ - Taking a list of file paths to upload
512
+ - Breaking files into smaller chunks for efficient storage
513
+ - Avoiding duplicate storage by recognizing identical chunks across files
514
+ - Connecting to a storage server (CAS server) that manages these chunks
515
+
516
+ The upload process works like this:
517
+ 1. Create a local folder at ~/.cache/huggingface/xet/chunk-cache to store file chunks for reuse.
518
+ 2. Process files in parallel (up to 8 files at once):
519
+ 2.1. Read the file content.
520
+ 2.2. Split the file content into smaller chunks based on content patterns: each chunk gets a unique ID based on what's in it.
521
+ 2.3. For each chunk:
522
+ - Check if it already exists in storage.
523
+ - Skip uploading chunks that already exist.
524
+ 2.4. Group chunks into larger blocks for efficient transfer.
525
+ 2.5. Upload these blocks to the storage server.
526
+ 2.6. Create and upload information about how the file is structured.
527
+ 3. Return reference files that contain information about the uploaded files, which can be used later to download them.
528
+ """
529
+ if len(additions) == 0:
530
+ return
531
+
532
+ # at this point, we know that hf_xet is installed
533
+ from hf_xet import upload_bytes, upload_files
534
+
535
+ from .utils._xet_progress_reporting import XetProgressReporter
536
+
537
+ try:
538
+ xet_connection_info = fetch_xet_connection_info_from_repo_info(
539
+ token_type=XetTokenType.WRITE,
540
+ repo_id=repo_id,
541
+ repo_type=repo_type,
542
+ revision=revision,
543
+ headers=headers,
544
+ endpoint=endpoint,
545
+ params={"create_pr": "1"} if create_pr else None,
546
+ )
547
+ except HfHubHTTPError as e:
548
+ if e.response.status_code == 401:
549
+ raise XetAuthorizationError(
550
+ f"You are unauthorized to upload to xet storage for {repo_type}/{repo_id}. "
551
+ f"Please check that you have configured your access token with write access to the repo."
552
+ ) from e
553
+ raise
554
+
555
+ xet_endpoint = xet_connection_info.endpoint
556
+ access_token_info = (xet_connection_info.access_token, xet_connection_info.expiration_unix_epoch)
557
+
558
+ def token_refresher() -> Tuple[str, int]:
559
+ new_xet_connection = fetch_xet_connection_info_from_repo_info(
560
+ token_type=XetTokenType.WRITE,
561
+ repo_id=repo_id,
562
+ repo_type=repo_type,
563
+ revision=revision,
564
+ headers=headers,
565
+ endpoint=endpoint,
566
+ params={"create_pr": "1"} if create_pr else None,
567
+ )
568
+ if new_xet_connection is None:
569
+ raise XetRefreshTokenError("Failed to refresh xet token")
570
+ return new_xet_connection.access_token, new_xet_connection.expiration_unix_epoch
571
+
572
+ if not are_progress_bars_disabled():
573
+ progress = XetProgressReporter()
574
+ progress_callback = progress.update_progress
575
+ else:
576
+ progress, progress_callback = None, None
577
+
578
+ try:
579
+ for i, chunk in enumerate(chunk_iterable(additions, chunk_size=UPLOAD_BATCH_MAX_NUM_FILES)):
580
+ _chunk = [op for op in chunk]
581
+
582
+ bytes_ops = [op for op in _chunk if isinstance(op.path_or_fileobj, bytes)]
583
+ paths_ops = [op for op in _chunk if isinstance(op.path_or_fileobj, (str, Path))]
584
+
585
+ if len(paths_ops) > 0:
586
+ upload_files(
587
+ [str(op.path_or_fileobj) for op in paths_ops],
588
+ xet_endpoint,
589
+ access_token_info,
590
+ token_refresher,
591
+ progress_callback,
592
+ repo_type,
593
+ )
594
+ if len(bytes_ops) > 0:
595
+ upload_bytes(
596
+ [op.path_or_fileobj for op in bytes_ops],
597
+ xet_endpoint,
598
+ access_token_info,
599
+ token_refresher,
600
+ progress_callback,
601
+ repo_type,
602
+ )
603
+
604
+ finally:
605
+ if progress is not None:
606
+ progress.close(False)
607
+
608
+ return
609
+
610
+
611
+ def _validate_preupload_info(preupload_info: dict):
612
+ files = preupload_info.get("files")
613
+ if not isinstance(files, list):
614
+ raise ValueError("preupload_info is improperly formatted")
615
+ for file_info in files:
616
+ if not (
617
+ isinstance(file_info, dict)
618
+ and isinstance(file_info.get("path"), str)
619
+ and isinstance(file_info.get("uploadMode"), str)
620
+ and (file_info["uploadMode"] in ("lfs", "regular"))
621
+ ):
622
+ raise ValueError("preupload_info is improperly formatted:")
623
+ return preupload_info
624
+
625
+
626
+ @validate_hf_hub_args
627
+ def _fetch_upload_modes(
628
+ additions: Iterable[CommitOperationAdd],
629
+ repo_type: str,
630
+ repo_id: str,
631
+ headers: Dict[str, str],
632
+ revision: str,
633
+ endpoint: Optional[str] = None,
634
+ create_pr: bool = False,
635
+ gitignore_content: Optional[str] = None,
636
+ ) -> None:
637
+ """
638
+ Requests the Hub "preupload" endpoint to determine whether each input file should be uploaded as a regular git blob,
639
+ as a git LFS blob, or as a XET file. Input `additions` are mutated in-place with the upload mode.
640
+
641
+ Args:
642
+ additions (`Iterable` of :class:`CommitOperationAdd`):
643
+ Iterable of :class:`CommitOperationAdd` describing the files to
644
+ upload to the Hub.
645
+ repo_type (`str`):
646
+ Type of the repo to upload to: `"model"`, `"dataset"` or `"space"`.
647
+ repo_id (`str`):
648
+ A namespace (user or an organization) and a repo name separated
649
+ by a `/`.
650
+ headers (`Dict[str, str]`):
651
+ Headers to use for the request, including authorization headers and user agent.
652
+ revision (`str`):
653
+ The git revision to upload the files to. Can be any valid git revision.
654
+ gitignore_content (`str`, *optional*):
655
+ The content of the `.gitignore` file to know which files should be ignored. The order of priority
656
+ is to first check if `gitignore_content` is passed, then check if the `.gitignore` file is present
657
+ in the list of files to commit and finally default to the `.gitignore` file already hosted on the Hub
658
+ (if any).
659
+ Raises:
660
+ [`~utils.HfHubHTTPError`]
661
+ If the Hub API returned an error.
662
+ [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
663
+ If the Hub API response is improperly formatted.
664
+ """
665
+ endpoint = endpoint if endpoint is not None else constants.ENDPOINT
666
+
667
+ # Fetch upload mode (LFS or regular) chunk by chunk.
668
+ upload_modes: Dict[str, UploadMode] = {}
669
+ should_ignore_info: Dict[str, bool] = {}
670
+ oid_info: Dict[str, Optional[str]] = {}
671
+
672
+ for chunk in chunk_iterable(additions, 256):
673
+ payload: Dict = {
674
+ "files": [
675
+ {
676
+ "path": op.path_in_repo,
677
+ "sample": base64.b64encode(op.upload_info.sample).decode("ascii"),
678
+ "size": op.upload_info.size,
679
+ }
680
+ for op in chunk
681
+ ]
682
+ }
683
+ if gitignore_content is not None:
684
+ payload["gitIgnore"] = gitignore_content
685
+
686
+ resp = get_session().post(
687
+ f"{endpoint}/api/{repo_type}s/{repo_id}/preupload/{revision}",
688
+ json=payload,
689
+ headers=headers,
690
+ params={"create_pr": "1"} if create_pr else None,
691
+ )
692
+ hf_raise_for_status(resp)
693
+ preupload_info = _validate_preupload_info(resp.json())
694
+ upload_modes.update(**{file["path"]: file["uploadMode"] for file in preupload_info["files"]})
695
+ should_ignore_info.update(**{file["path"]: file["shouldIgnore"] for file in preupload_info["files"]})
696
+ oid_info.update(**{file["path"]: file.get("oid") for file in preupload_info["files"]})
697
+
698
+ # Set upload mode for each addition operation
699
+ for addition in additions:
700
+ addition._upload_mode = upload_modes[addition.path_in_repo]
701
+ addition._should_ignore = should_ignore_info[addition.path_in_repo]
702
+ addition._remote_oid = oid_info[addition.path_in_repo]
703
+
704
+ # Empty files cannot be uploaded as LFS (S3 would fail with a 501 Not Implemented)
705
+ # => empty files are uploaded as "regular" to still allow users to commit them.
706
+ for addition in additions:
707
+ if addition.upload_info.size == 0:
708
+ addition._upload_mode = "regular"
709
+
710
+
711
+ @validate_hf_hub_args
712
+ def _fetch_files_to_copy(
713
+ copies: Iterable[CommitOperationCopy],
714
+ repo_type: str,
715
+ repo_id: str,
716
+ headers: Dict[str, str],
717
+ revision: str,
718
+ endpoint: Optional[str] = None,
719
+ ) -> Dict[Tuple[str, Optional[str]], Union["RepoFile", bytes]]:
720
+ """
721
+ Fetch information about the files to copy.
722
+
723
+ For LFS files, we only need their metadata (file size and sha256) while for regular files
724
+ we need to download the raw content from the Hub.
725
+
726
+ Args:
727
+ copies (`Iterable` of :class:`CommitOperationCopy`):
728
+ Iterable of :class:`CommitOperationCopy` describing the files to
729
+ copy on the Hub.
730
+ repo_type (`str`):
731
+ Type of the repo to upload to: `"model"`, `"dataset"` or `"space"`.
732
+ repo_id (`str`):
733
+ A namespace (user or an organization) and a repo name separated
734
+ by a `/`.
735
+ headers (`Dict[str, str]`):
736
+ Headers to use for the request, including authorization headers and user agent.
737
+ revision (`str`):
738
+ The git revision to upload the files to. Can be any valid git revision.
739
+
740
+ Returns: `Dict[Tuple[str, Optional[str]], Union[RepoFile, bytes]]]`
741
+ Key is the file path and revision of the file to copy.
742
+ Value is the raw content as bytes (for regular files) or the file information as a RepoFile (for LFS files).
743
+
744
+ Raises:
745
+ [`~utils.HfHubHTTPError`]
746
+ If the Hub API returned an error.
747
+ [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
748
+ If the Hub API response is improperly formatted.
749
+ """
750
+ from .hf_api import HfApi, RepoFolder
751
+
752
+ hf_api = HfApi(endpoint=endpoint, headers=headers)
753
+ files_to_copy: Dict[Tuple[str, Optional[str]], Union["RepoFile", bytes]] = {}
754
+ # Store (path, revision) -> oid mapping
755
+ oid_info: Dict[Tuple[str, Optional[str]], Optional[str]] = {}
756
+ # 1. Fetch OIDs for destination paths in batches.
757
+ dest_paths = [op.path_in_repo for op in copies]
758
+ for offset in range(0, len(dest_paths), FETCH_LFS_BATCH_SIZE):
759
+ dest_repo_files = hf_api.get_paths_info(
760
+ repo_id=repo_id,
761
+ paths=dest_paths[offset : offset + FETCH_LFS_BATCH_SIZE],
762
+ revision=revision,
763
+ repo_type=repo_type,
764
+ )
765
+ for file in dest_repo_files:
766
+ if not isinstance(file, RepoFolder):
767
+ oid_info[(file.path, revision)] = file.blob_id
768
+
769
+ # 2. Group by source revision and fetch source file info in batches.
770
+ for src_revision, operations in groupby(copies, key=lambda op: op.src_revision):
771
+ operations = list(operations) # type: ignore
772
+ src_paths = [op.src_path_in_repo for op in operations]
773
+ for offset in range(0, len(src_paths), FETCH_LFS_BATCH_SIZE):
774
+ src_repo_files = hf_api.get_paths_info(
775
+ repo_id=repo_id,
776
+ paths=src_paths[offset : offset + FETCH_LFS_BATCH_SIZE],
777
+ revision=src_revision or revision,
778
+ repo_type=repo_type,
779
+ )
780
+
781
+ for src_repo_file in src_repo_files:
782
+ if isinstance(src_repo_file, RepoFolder):
783
+ raise NotImplementedError("Copying a folder is not implemented.")
784
+ oid_info[(src_repo_file.path, src_revision)] = src_repo_file.blob_id
785
+ # If it's an LFS file, store the RepoFile object. Otherwise, download raw bytes.
786
+ if src_repo_file.lfs:
787
+ files_to_copy[(src_repo_file.path, src_revision)] = src_repo_file
788
+ else:
789
+ # TODO: (optimization) download regular files to copy concurrently
790
+ url = hf_hub_url(
791
+ endpoint=endpoint,
792
+ repo_type=repo_type,
793
+ repo_id=repo_id,
794
+ revision=src_revision or revision,
795
+ filename=src_repo_file.path,
796
+ )
797
+ response = get_session().get(url, headers=headers)
798
+ hf_raise_for_status(response)
799
+ files_to_copy[(src_repo_file.path, src_revision)] = response.content
800
+ # 3. Ensure all operations found a corresponding file in the Hub
801
+ # and track src/dest OIDs for each operation.
802
+ for operation in operations:
803
+ if (operation.src_path_in_repo, src_revision) not in files_to_copy:
804
+ raise EntryNotFoundError(
805
+ f"Cannot copy {operation.src_path_in_repo} at revision "
806
+ f"{src_revision or revision}: file is missing on repo."
807
+ )
808
+ operation._src_oid = oid_info.get((operation.src_path_in_repo, operation.src_revision))
809
+ operation._dest_oid = oid_info.get((operation.path_in_repo, revision))
810
+ return files_to_copy
811
+
812
+
813
+ def _prepare_commit_payload(
814
+ operations: Iterable[CommitOperation],
815
+ files_to_copy: Dict[Tuple[str, Optional[str]], Union["RepoFile", bytes]],
816
+ commit_message: str,
817
+ commit_description: Optional[str] = None,
818
+ parent_commit: Optional[str] = None,
819
+ ) -> Iterable[Dict[str, Any]]:
820
+ """
821
+ Builds the payload to POST to the `/commit` API of the Hub.
822
+
823
+ Payload is returned as an iterator so that it can be streamed as a ndjson in the
824
+ POST request.
825
+
826
+ For more information, see:
827
+ - https://github.com/huggingface/huggingface_hub/issues/1085#issuecomment-1265208073
828
+ - http://ndjson.org/
829
+ """
830
+ commit_description = commit_description if commit_description is not None else ""
831
+
832
+ # 1. Send a header item with the commit metadata
833
+ header_value = {"summary": commit_message, "description": commit_description}
834
+ if parent_commit is not None:
835
+ header_value["parentCommit"] = parent_commit
836
+ yield {"key": "header", "value": header_value}
837
+
838
+ nb_ignored_files = 0
839
+
840
+ # 2. Send operations, one per line
841
+ for operation in operations:
842
+ # Skip ignored files
843
+ if isinstance(operation, CommitOperationAdd) and operation._should_ignore:
844
+ logger.debug(f"Skipping file '{operation.path_in_repo}' in commit (ignored by gitignore file).")
845
+ nb_ignored_files += 1
846
+ continue
847
+
848
+ # 2.a. Case adding a regular file
849
+ if isinstance(operation, CommitOperationAdd) and operation._upload_mode == "regular":
850
+ yield {
851
+ "key": "file",
852
+ "value": {
853
+ "content": operation.b64content().decode(),
854
+ "path": operation.path_in_repo,
855
+ "encoding": "base64",
856
+ },
857
+ }
858
+ # 2.b. Case adding an LFS file
859
+ elif isinstance(operation, CommitOperationAdd) and operation._upload_mode == "lfs":
860
+ yield {
861
+ "key": "lfsFile",
862
+ "value": {
863
+ "path": operation.path_in_repo,
864
+ "algo": "sha256",
865
+ "oid": operation.upload_info.sha256.hex(),
866
+ "size": operation.upload_info.size,
867
+ },
868
+ }
869
+ # 2.c. Case deleting a file or folder
870
+ elif isinstance(operation, CommitOperationDelete):
871
+ yield {
872
+ "key": "deletedFolder" if operation.is_folder else "deletedFile",
873
+ "value": {"path": operation.path_in_repo},
874
+ }
875
+ # 2.d. Case copying a file or folder
876
+ elif isinstance(operation, CommitOperationCopy):
877
+ file_to_copy = files_to_copy[(operation.src_path_in_repo, operation.src_revision)]
878
+ if isinstance(file_to_copy, bytes):
879
+ yield {
880
+ "key": "file",
881
+ "value": {
882
+ "content": base64.b64encode(file_to_copy).decode(),
883
+ "path": operation.path_in_repo,
884
+ "encoding": "base64",
885
+ },
886
+ }
887
+ elif file_to_copy.lfs:
888
+ yield {
889
+ "key": "lfsFile",
890
+ "value": {
891
+ "path": operation.path_in_repo,
892
+ "algo": "sha256",
893
+ "oid": file_to_copy.lfs.sha256,
894
+ },
895
+ }
896
+ else:
897
+ raise ValueError(
898
+ "Malformed files_to_copy (should be raw file content as bytes or RepoFile objects with LFS info."
899
+ )
900
+ # 2.e. Never expected to happen
901
+ else:
902
+ raise ValueError(
903
+ f"Unknown operation to commit. Operation: {operation}. Upload mode:"
904
+ f" {getattr(operation, '_upload_mode', None)}"
905
+ )
906
+
907
+ if nb_ignored_files > 0:
908
+ logger.info(f"Skipped {nb_ignored_files} file(s) in commit (ignored by gitignore file).")
venv/lib/python3.13/site-packages/huggingface_hub/_commit_scheduler.py ADDED
@@ -0,0 +1,353 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import atexit
2
+ import logging
3
+ import os
4
+ import time
5
+ from concurrent.futures import Future
6
+ from dataclasses import dataclass
7
+ from io import SEEK_END, SEEK_SET, BytesIO
8
+ from pathlib import Path
9
+ from threading import Lock, Thread
10
+ from typing import Dict, List, Optional, Union
11
+
12
+ from .hf_api import DEFAULT_IGNORE_PATTERNS, CommitInfo, CommitOperationAdd, HfApi
13
+ from .utils import filter_repo_objects
14
+
15
+
16
+ logger = logging.getLogger(__name__)
17
+
18
+
19
+ @dataclass(frozen=True)
20
+ class _FileToUpload:
21
+ """Temporary dataclass to store info about files to upload. Not meant to be used directly."""
22
+
23
+ local_path: Path
24
+ path_in_repo: str
25
+ size_limit: int
26
+ last_modified: float
27
+
28
+
29
+ class CommitScheduler:
30
+ """
31
+ Scheduler to upload a local folder to the Hub at regular intervals (e.g. push to hub every 5 minutes).
32
+
33
+ The recommended way to use the scheduler is to use it as a context manager. This ensures that the scheduler is
34
+ properly stopped and the last commit is triggered when the script ends. The scheduler can also be stopped manually
35
+ with the `stop` method. Checkout the [upload guide](https://huggingface.co/docs/huggingface_hub/guides/upload#scheduled-uploads)
36
+ to learn more about how to use it.
37
+
38
+ Args:
39
+ repo_id (`str`):
40
+ The id of the repo to commit to.
41
+ folder_path (`str` or `Path`):
42
+ Path to the local folder to upload regularly.
43
+ every (`int` or `float`, *optional*):
44
+ The number of minutes between each commit. Defaults to 5 minutes.
45
+ path_in_repo (`str`, *optional*):
46
+ Relative path of the directory in the repo, for example: `"checkpoints/"`. Defaults to the root folder
47
+ of the repository.
48
+ repo_type (`str`, *optional*):
49
+ The type of the repo to commit to. Defaults to `model`.
50
+ revision (`str`, *optional*):
51
+ The revision of the repo to commit to. Defaults to `main`.
52
+ private (`bool`, *optional*):
53
+ Whether to make the repo private. If `None` (default), the repo will be public unless the organization's default is private. This value is ignored if the repo already exists.
54
+ token (`str`, *optional*):
55
+ The token to use to commit to the repo. Defaults to the token saved on the machine.
56
+ allow_patterns (`List[str]` or `str`, *optional*):
57
+ If provided, only files matching at least one pattern are uploaded.
58
+ ignore_patterns (`List[str]` or `str`, *optional*):
59
+ If provided, files matching any of the patterns are not uploaded.
60
+ squash_history (`bool`, *optional*):
61
+ Whether to squash the history of the repo after each commit. Defaults to `False`. Squashing commits is
62
+ useful to avoid degraded performances on the repo when it grows too large.
63
+ hf_api (`HfApi`, *optional*):
64
+ The [`HfApi`] client to use to commit to the Hub. Can be set with custom settings (user agent, token,...).
65
+
66
+ Example:
67
+ ```py
68
+ >>> from pathlib import Path
69
+ >>> from huggingface_hub import CommitScheduler
70
+
71
+ # Scheduler uploads every 10 minutes
72
+ >>> csv_path = Path("watched_folder/data.csv")
73
+ >>> CommitScheduler(repo_id="test_scheduler", repo_type="dataset", folder_path=csv_path.parent, every=10)
74
+
75
+ >>> with csv_path.open("a") as f:
76
+ ... f.write("first line")
77
+
78
+ # Some time later (...)
79
+ >>> with csv_path.open("a") as f:
80
+ ... f.write("second line")
81
+ ```
82
+
83
+ Example using a context manager:
84
+ ```py
85
+ >>> from pathlib import Path
86
+ >>> from huggingface_hub import CommitScheduler
87
+
88
+ >>> with CommitScheduler(repo_id="test_scheduler", repo_type="dataset", folder_path="watched_folder", every=10) as scheduler:
89
+ ... csv_path = Path("watched_folder/data.csv")
90
+ ... with csv_path.open("a") as f:
91
+ ... f.write("first line")
92
+ ... (...)
93
+ ... with csv_path.open("a") as f:
94
+ ... f.write("second line")
95
+
96
+ # Scheduler is now stopped and last commit have been triggered
97
+ ```
98
+ """
99
+
100
+ def __init__(
101
+ self,
102
+ *,
103
+ repo_id: str,
104
+ folder_path: Union[str, Path],
105
+ every: Union[int, float] = 5,
106
+ path_in_repo: Optional[str] = None,
107
+ repo_type: Optional[str] = None,
108
+ revision: Optional[str] = None,
109
+ private: Optional[bool] = None,
110
+ token: Optional[str] = None,
111
+ allow_patterns: Optional[Union[List[str], str]] = None,
112
+ ignore_patterns: Optional[Union[List[str], str]] = None,
113
+ squash_history: bool = False,
114
+ hf_api: Optional["HfApi"] = None,
115
+ ) -> None:
116
+ self.api = hf_api or HfApi(token=token)
117
+
118
+ # Folder
119
+ self.folder_path = Path(folder_path).expanduser().resolve()
120
+ self.path_in_repo = path_in_repo or ""
121
+ self.allow_patterns = allow_patterns
122
+
123
+ if ignore_patterns is None:
124
+ ignore_patterns = []
125
+ elif isinstance(ignore_patterns, str):
126
+ ignore_patterns = [ignore_patterns]
127
+ self.ignore_patterns = ignore_patterns + DEFAULT_IGNORE_PATTERNS
128
+
129
+ if self.folder_path.is_file():
130
+ raise ValueError(f"'folder_path' must be a directory, not a file: '{self.folder_path}'.")
131
+ self.folder_path.mkdir(parents=True, exist_ok=True)
132
+
133
+ # Repository
134
+ repo_url = self.api.create_repo(repo_id=repo_id, private=private, repo_type=repo_type, exist_ok=True)
135
+ self.repo_id = repo_url.repo_id
136
+ self.repo_type = repo_type
137
+ self.revision = revision
138
+ self.token = token
139
+
140
+ # Keep track of already uploaded files
141
+ self.last_uploaded: Dict[Path, float] = {} # key is local path, value is timestamp
142
+
143
+ # Scheduler
144
+ if not every > 0:
145
+ raise ValueError(f"'every' must be a positive integer, not '{every}'.")
146
+ self.lock = Lock()
147
+ self.every = every
148
+ self.squash_history = squash_history
149
+
150
+ logger.info(f"Scheduled job to push '{self.folder_path}' to '{self.repo_id}' every {self.every} minutes.")
151
+ self._scheduler_thread = Thread(target=self._run_scheduler, daemon=True)
152
+ self._scheduler_thread.start()
153
+ atexit.register(self._push_to_hub)
154
+
155
+ self.__stopped = False
156
+
157
+ def stop(self) -> None:
158
+ """Stop the scheduler.
159
+
160
+ A stopped scheduler cannot be restarted. Mostly for tests purposes.
161
+ """
162
+ self.__stopped = True
163
+
164
+ def __enter__(self) -> "CommitScheduler":
165
+ return self
166
+
167
+ def __exit__(self, exc_type, exc_value, traceback) -> None:
168
+ # Upload last changes before exiting
169
+ self.trigger().result()
170
+ self.stop()
171
+ return
172
+
173
+ def _run_scheduler(self) -> None:
174
+ """Dumb thread waiting between each scheduled push to Hub."""
175
+ while True:
176
+ self.last_future = self.trigger()
177
+ time.sleep(self.every * 60)
178
+ if self.__stopped:
179
+ break
180
+
181
+ def trigger(self) -> Future:
182
+ """Trigger a `push_to_hub` and return a future.
183
+
184
+ This method is automatically called every `every` minutes. You can also call it manually to trigger a commit
185
+ immediately, without waiting for the next scheduled commit.
186
+ """
187
+ return self.api.run_as_future(self._push_to_hub)
188
+
189
+ def _push_to_hub(self) -> Optional[CommitInfo]:
190
+ if self.__stopped: # If stopped, already scheduled commits are ignored
191
+ return None
192
+
193
+ logger.info("(Background) scheduled commit triggered.")
194
+ try:
195
+ value = self.push_to_hub()
196
+ if self.squash_history:
197
+ logger.info("(Background) squashing repo history.")
198
+ self.api.super_squash_history(repo_id=self.repo_id, repo_type=self.repo_type, branch=self.revision)
199
+ return value
200
+ except Exception as e:
201
+ logger.error(f"Error while pushing to Hub: {e}") # Depending on the setup, error might be silenced
202
+ raise
203
+
204
+ def push_to_hub(self) -> Optional[CommitInfo]:
205
+ """
206
+ Push folder to the Hub and return the commit info.
207
+
208
+ <Tip warning={true}>
209
+
210
+ This method is not meant to be called directly. It is run in the background by the scheduler, respecting a
211
+ queue mechanism to avoid concurrent commits. Making a direct call to the method might lead to concurrency
212
+ issues.
213
+
214
+ </Tip>
215
+
216
+ The default behavior of `push_to_hub` is to assume an append-only folder. It lists all files in the folder and
217
+ uploads only changed files. If no changes are found, the method returns without committing anything. If you want
218
+ to change this behavior, you can inherit from [`CommitScheduler`] and override this method. This can be useful
219
+ for example to compress data together in a single file before committing. For more details and examples, check
220
+ out our [integration guide](https://huggingface.co/docs/huggingface_hub/main/en/guides/upload#scheduled-uploads).
221
+ """
222
+ # Check files to upload (with lock)
223
+ with self.lock:
224
+ logger.debug("Listing files to upload for scheduled commit.")
225
+
226
+ # List files from folder (taken from `_prepare_upload_folder_additions`)
227
+ relpath_to_abspath = {
228
+ path.relative_to(self.folder_path).as_posix(): path
229
+ for path in sorted(self.folder_path.glob("**/*")) # sorted to be deterministic
230
+ if path.is_file()
231
+ }
232
+ prefix = f"{self.path_in_repo.strip('/')}/" if self.path_in_repo else ""
233
+
234
+ # Filter with pattern + filter out unchanged files + retrieve current file size
235
+ files_to_upload: List[_FileToUpload] = []
236
+ for relpath in filter_repo_objects(
237
+ relpath_to_abspath.keys(), allow_patterns=self.allow_patterns, ignore_patterns=self.ignore_patterns
238
+ ):
239
+ local_path = relpath_to_abspath[relpath]
240
+ stat = local_path.stat()
241
+ if self.last_uploaded.get(local_path) is None or self.last_uploaded[local_path] != stat.st_mtime:
242
+ files_to_upload.append(
243
+ _FileToUpload(
244
+ local_path=local_path,
245
+ path_in_repo=prefix + relpath,
246
+ size_limit=stat.st_size,
247
+ last_modified=stat.st_mtime,
248
+ )
249
+ )
250
+
251
+ # Return if nothing to upload
252
+ if len(files_to_upload) == 0:
253
+ logger.debug("Dropping schedule commit: no changed file to upload.")
254
+ return None
255
+
256
+ # Convert `_FileToUpload` as `CommitOperationAdd` (=> compute file shas + limit to file size)
257
+ logger.debug("Removing unchanged files since previous scheduled commit.")
258
+ add_operations = [
259
+ CommitOperationAdd(
260
+ # Cap the file to its current size, even if the user append data to it while a scheduled commit is happening
261
+ path_or_fileobj=PartialFileIO(file_to_upload.local_path, size_limit=file_to_upload.size_limit),
262
+ path_in_repo=file_to_upload.path_in_repo,
263
+ )
264
+ for file_to_upload in files_to_upload
265
+ ]
266
+
267
+ # Upload files (append mode expected - no need for lock)
268
+ logger.debug("Uploading files for scheduled commit.")
269
+ commit_info = self.api.create_commit(
270
+ repo_id=self.repo_id,
271
+ repo_type=self.repo_type,
272
+ operations=add_operations,
273
+ commit_message="Scheduled Commit",
274
+ revision=self.revision,
275
+ )
276
+
277
+ # Successful commit: keep track of the latest "last_modified" for each file
278
+ for file in files_to_upload:
279
+ self.last_uploaded[file.local_path] = file.last_modified
280
+ return commit_info
281
+
282
+
283
+ class PartialFileIO(BytesIO):
284
+ """A file-like object that reads only the first part of a file.
285
+
286
+ Useful to upload a file to the Hub when the user might still be appending data to it. Only the first part of the
287
+ file is uploaded (i.e. the part that was available when the filesystem was first scanned).
288
+
289
+ In practice, only used internally by the CommitScheduler to regularly push a folder to the Hub with minimal
290
+ disturbance for the user. The object is passed to `CommitOperationAdd`.
291
+
292
+ Only supports `read`, `tell` and `seek` methods.
293
+
294
+ Args:
295
+ file_path (`str` or `Path`):
296
+ Path to the file to read.
297
+ size_limit (`int`):
298
+ The maximum number of bytes to read from the file. If the file is larger than this, only the first part
299
+ will be read (and uploaded).
300
+ """
301
+
302
+ def __init__(self, file_path: Union[str, Path], size_limit: int) -> None:
303
+ self._file_path = Path(file_path)
304
+ self._file = self._file_path.open("rb")
305
+ self._size_limit = min(size_limit, os.fstat(self._file.fileno()).st_size)
306
+
307
+ def __del__(self) -> None:
308
+ self._file.close()
309
+ return super().__del__()
310
+
311
+ def __repr__(self) -> str:
312
+ return f"<PartialFileIO file_path={self._file_path} size_limit={self._size_limit}>"
313
+
314
+ def __len__(self) -> int:
315
+ return self._size_limit
316
+
317
+ def __getattribute__(self, name: str):
318
+ if name.startswith("_") or name in ("read", "tell", "seek"): # only 3 public methods supported
319
+ return super().__getattribute__(name)
320
+ raise NotImplementedError(f"PartialFileIO does not support '{name}'.")
321
+
322
+ def tell(self) -> int:
323
+ """Return the current file position."""
324
+ return self._file.tell()
325
+
326
+ def seek(self, __offset: int, __whence: int = SEEK_SET) -> int:
327
+ """Change the stream position to the given offset.
328
+
329
+ Behavior is the same as a regular file, except that the position is capped to the size limit.
330
+ """
331
+ if __whence == SEEK_END:
332
+ # SEEK_END => set from the truncated end
333
+ __offset = len(self) + __offset
334
+ __whence = SEEK_SET
335
+
336
+ pos = self._file.seek(__offset, __whence)
337
+ if pos > self._size_limit:
338
+ return self._file.seek(self._size_limit)
339
+ return pos
340
+
341
+ def read(self, __size: Optional[int] = -1) -> bytes:
342
+ """Read at most `__size` bytes from the file.
343
+
344
+ Behavior is the same as a regular file, except that it is capped to the size limit.
345
+ """
346
+ current = self._file.tell()
347
+ if __size is None or __size < 0:
348
+ # Read until file limit
349
+ truncated_size = self._size_limit - current
350
+ else:
351
+ # Read until file limit or __size
352
+ truncated_size = min(__size, self._size_limit - current)
353
+ return self._file.read(truncated_size)
venv/lib/python3.13/site-packages/huggingface_hub/_inference_endpoints.py ADDED
@@ -0,0 +1,413 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ from dataclasses import dataclass, field
3
+ from datetime import datetime
4
+ from enum import Enum
5
+ from typing import TYPE_CHECKING, Dict, Optional, Union
6
+
7
+ from huggingface_hub.errors import InferenceEndpointError, InferenceEndpointTimeoutError
8
+
9
+ from .utils import get_session, logging, parse_datetime
10
+
11
+
12
+ if TYPE_CHECKING:
13
+ from .hf_api import HfApi
14
+ from .inference._client import InferenceClient
15
+ from .inference._generated._async_client import AsyncInferenceClient
16
+
17
+ logger = logging.get_logger(__name__)
18
+
19
+
20
+ class InferenceEndpointStatus(str, Enum):
21
+ PENDING = "pending"
22
+ INITIALIZING = "initializing"
23
+ UPDATING = "updating"
24
+ UPDATE_FAILED = "updateFailed"
25
+ RUNNING = "running"
26
+ PAUSED = "paused"
27
+ FAILED = "failed"
28
+ SCALED_TO_ZERO = "scaledToZero"
29
+
30
+
31
+ class InferenceEndpointType(str, Enum):
32
+ PUBlIC = "public"
33
+ PROTECTED = "protected"
34
+ PRIVATE = "private"
35
+
36
+
37
+ @dataclass
38
+ class InferenceEndpoint:
39
+ """
40
+ Contains information about a deployed Inference Endpoint.
41
+
42
+ Args:
43
+ name (`str`):
44
+ The unique name of the Inference Endpoint.
45
+ namespace (`str`):
46
+ The namespace where the Inference Endpoint is located.
47
+ repository (`str`):
48
+ The name of the model repository deployed on this Inference Endpoint.
49
+ status ([`InferenceEndpointStatus`]):
50
+ The current status of the Inference Endpoint.
51
+ url (`str`, *optional*):
52
+ The URL of the Inference Endpoint, if available. Only a deployed Inference Endpoint will have a URL.
53
+ framework (`str`):
54
+ The machine learning framework used for the model.
55
+ revision (`str`):
56
+ The specific model revision deployed on the Inference Endpoint.
57
+ task (`str`):
58
+ The task associated with the deployed model.
59
+ created_at (`datetime.datetime`):
60
+ The timestamp when the Inference Endpoint was created.
61
+ updated_at (`datetime.datetime`):
62
+ The timestamp of the last update of the Inference Endpoint.
63
+ type ([`InferenceEndpointType`]):
64
+ The type of the Inference Endpoint (public, protected, private).
65
+ raw (`Dict`):
66
+ The raw dictionary data returned from the API.
67
+ token (`str` or `bool`, *optional*):
68
+ Authentication token for the Inference Endpoint, if set when requesting the API. Will default to the
69
+ locally saved token if not provided. Pass `token=False` if you don't want to send your token to the server.
70
+
71
+ Example:
72
+ ```python
73
+ >>> from huggingface_hub import get_inference_endpoint
74
+ >>> endpoint = get_inference_endpoint("my-text-to-image")
75
+ >>> endpoint
76
+ InferenceEndpoint(name='my-text-to-image', ...)
77
+
78
+ # Get status
79
+ >>> endpoint.status
80
+ 'running'
81
+ >>> endpoint.url
82
+ 'https://my-text-to-image.region.vendor.endpoints.huggingface.cloud'
83
+
84
+ # Run inference
85
+ >>> endpoint.client.text_to_image(...)
86
+
87
+ # Pause endpoint to save $$$
88
+ >>> endpoint.pause()
89
+
90
+ # ...
91
+ # Resume and wait for deployment
92
+ >>> endpoint.resume()
93
+ >>> endpoint.wait()
94
+ >>> endpoint.client.text_to_image(...)
95
+ ```
96
+ """
97
+
98
+ # Field in __repr__
99
+ name: str = field(init=False)
100
+ namespace: str
101
+ repository: str = field(init=False)
102
+ status: InferenceEndpointStatus = field(init=False)
103
+ health_route: str = field(init=False)
104
+ url: Optional[str] = field(init=False)
105
+
106
+ # Other fields
107
+ framework: str = field(repr=False, init=False)
108
+ revision: str = field(repr=False, init=False)
109
+ task: str = field(repr=False, init=False)
110
+ created_at: datetime = field(repr=False, init=False)
111
+ updated_at: datetime = field(repr=False, init=False)
112
+ type: InferenceEndpointType = field(repr=False, init=False)
113
+
114
+ # Raw dict from the API
115
+ raw: Dict = field(repr=False)
116
+
117
+ # Internal fields
118
+ _token: Union[str, bool, None] = field(repr=False, compare=False)
119
+ _api: "HfApi" = field(repr=False, compare=False)
120
+
121
+ @classmethod
122
+ def from_raw(
123
+ cls, raw: Dict, namespace: str, token: Union[str, bool, None] = None, api: Optional["HfApi"] = None
124
+ ) -> "InferenceEndpoint":
125
+ """Initialize object from raw dictionary."""
126
+ if api is None:
127
+ from .hf_api import HfApi
128
+
129
+ api = HfApi()
130
+ if token is None:
131
+ token = api.token
132
+
133
+ # All other fields are populated in __post_init__
134
+ return cls(raw=raw, namespace=namespace, _token=token, _api=api)
135
+
136
+ def __post_init__(self) -> None:
137
+ """Populate fields from raw dictionary."""
138
+ self._populate_from_raw()
139
+
140
+ @property
141
+ def client(self) -> "InferenceClient":
142
+ """Returns a client to make predictions on this Inference Endpoint.
143
+
144
+ Returns:
145
+ [`InferenceClient`]: an inference client pointing to the deployed endpoint.
146
+
147
+ Raises:
148
+ [`InferenceEndpointError`]: If the Inference Endpoint is not yet deployed.
149
+ """
150
+ if self.url is None:
151
+ raise InferenceEndpointError(
152
+ "Cannot create a client for this Inference Endpoint as it is not yet deployed. "
153
+ "Please wait for the Inference Endpoint to be deployed using `endpoint.wait()` and try again."
154
+ )
155
+ from .inference._client import InferenceClient
156
+
157
+ return InferenceClient(
158
+ model=self.url,
159
+ token=self._token, # type: ignore[arg-type] # boolean token shouldn't be possible. In practice it's ok.
160
+ )
161
+
162
+ @property
163
+ def async_client(self) -> "AsyncInferenceClient":
164
+ """Returns a client to make predictions on this Inference Endpoint.
165
+
166
+ Returns:
167
+ [`AsyncInferenceClient`]: an asyncio-compatible inference client pointing to the deployed endpoint.
168
+
169
+ Raises:
170
+ [`InferenceEndpointError`]: If the Inference Endpoint is not yet deployed.
171
+ """
172
+ if self.url is None:
173
+ raise InferenceEndpointError(
174
+ "Cannot create a client for this Inference Endpoint as it is not yet deployed. "
175
+ "Please wait for the Inference Endpoint to be deployed using `endpoint.wait()` and try again."
176
+ )
177
+ from .inference._generated._async_client import AsyncInferenceClient
178
+
179
+ return AsyncInferenceClient(
180
+ model=self.url,
181
+ token=self._token, # type: ignore[arg-type] # boolean token shouldn't be possible. In practice it's ok.
182
+ )
183
+
184
+ def wait(self, timeout: Optional[int] = None, refresh_every: int = 5) -> "InferenceEndpoint":
185
+ """Wait for the Inference Endpoint to be deployed.
186
+
187
+ Information from the server will be fetched every 1s. If the Inference Endpoint is not deployed after `timeout`
188
+ seconds, a [`InferenceEndpointTimeoutError`] will be raised. The [`InferenceEndpoint`] will be mutated in place with the latest
189
+ data.
190
+
191
+ Args:
192
+ timeout (`int`, *optional*):
193
+ The maximum time to wait for the Inference Endpoint to be deployed, in seconds. If `None`, will wait
194
+ indefinitely.
195
+ refresh_every (`int`, *optional*):
196
+ The time to wait between each fetch of the Inference Endpoint status, in seconds. Defaults to 5s.
197
+
198
+ Returns:
199
+ [`InferenceEndpoint`]: the same Inference Endpoint, mutated in place with the latest data.
200
+
201
+ Raises:
202
+ [`InferenceEndpointError`]
203
+ If the Inference Endpoint ended up in a failed state.
204
+ [`InferenceEndpointTimeoutError`]
205
+ If the Inference Endpoint is not deployed after `timeout` seconds.
206
+ """
207
+ if timeout is not None and timeout < 0:
208
+ raise ValueError("`timeout` cannot be negative.")
209
+ if refresh_every <= 0:
210
+ raise ValueError("`refresh_every` must be positive.")
211
+
212
+ start = time.time()
213
+ while True:
214
+ if self.status == InferenceEndpointStatus.FAILED:
215
+ raise InferenceEndpointError(
216
+ f"Inference Endpoint {self.name} failed to deploy. Please check the logs for more information."
217
+ )
218
+ if self.status == InferenceEndpointStatus.UPDATE_FAILED:
219
+ raise InferenceEndpointError(
220
+ f"Inference Endpoint {self.name} failed to update. Please check the logs for more information."
221
+ )
222
+ if self.status == InferenceEndpointStatus.RUNNING and self.url is not None:
223
+ # Verify the endpoint is actually reachable
224
+ _health_url = f"{self.url.rstrip('/')}/{self.health_route.lstrip('/')}"
225
+ response = get_session().get(_health_url, headers=self._api._build_hf_headers(token=self._token))
226
+ if response.status_code == 200:
227
+ logger.info("Inference Endpoint is ready to be used.")
228
+ return self
229
+
230
+ if timeout is not None:
231
+ if time.time() - start > timeout:
232
+ raise InferenceEndpointTimeoutError("Timeout while waiting for Inference Endpoint to be deployed.")
233
+ logger.info(f"Inference Endpoint is not deployed yet ({self.status}). Waiting {refresh_every}s...")
234
+ time.sleep(refresh_every)
235
+ self.fetch()
236
+
237
+ def fetch(self) -> "InferenceEndpoint":
238
+ """Fetch latest information about the Inference Endpoint.
239
+
240
+ Returns:
241
+ [`InferenceEndpoint`]: the same Inference Endpoint, mutated in place with the latest data.
242
+ """
243
+ obj = self._api.get_inference_endpoint(name=self.name, namespace=self.namespace, token=self._token) # type: ignore [arg-type]
244
+ self.raw = obj.raw
245
+ self._populate_from_raw()
246
+ return self
247
+
248
+ def update(
249
+ self,
250
+ *,
251
+ # Compute update
252
+ accelerator: Optional[str] = None,
253
+ instance_size: Optional[str] = None,
254
+ instance_type: Optional[str] = None,
255
+ min_replica: Optional[int] = None,
256
+ max_replica: Optional[int] = None,
257
+ scale_to_zero_timeout: Optional[int] = None,
258
+ # Model update
259
+ repository: Optional[str] = None,
260
+ framework: Optional[str] = None,
261
+ revision: Optional[str] = None,
262
+ task: Optional[str] = None,
263
+ custom_image: Optional[Dict] = None,
264
+ secrets: Optional[Dict[str, str]] = None,
265
+ ) -> "InferenceEndpoint":
266
+ """Update the Inference Endpoint.
267
+
268
+ This method allows the update of either the compute configuration, the deployed model, or both. All arguments are
269
+ optional but at least one must be provided.
270
+
271
+ This is an alias for [`HfApi.update_inference_endpoint`]. The current object is mutated in place with the
272
+ latest data from the server.
273
+
274
+ Args:
275
+ accelerator (`str`, *optional*):
276
+ The hardware accelerator to be used for inference (e.g. `"cpu"`).
277
+ instance_size (`str`, *optional*):
278
+ The size or type of the instance to be used for hosting the model (e.g. `"x4"`).
279
+ instance_type (`str`, *optional*):
280
+ The cloud instance type where the Inference Endpoint will be deployed (e.g. `"intel-icl"`).
281
+ min_replica (`int`, *optional*):
282
+ The minimum number of replicas (instances) to keep running for the Inference Endpoint.
283
+ max_replica (`int`, *optional*):
284
+ The maximum number of replicas (instances) to scale to for the Inference Endpoint.
285
+ scale_to_zero_timeout (`int`, *optional*):
286
+ The duration in minutes before an inactive endpoint is scaled to zero.
287
+
288
+ repository (`str`, *optional*):
289
+ The name of the model repository associated with the Inference Endpoint (e.g. `"gpt2"`).
290
+ framework (`str`, *optional*):
291
+ The machine learning framework used for the model (e.g. `"custom"`).
292
+ revision (`str`, *optional*):
293
+ The specific model revision to deploy on the Inference Endpoint (e.g. `"6c0e6080953db56375760c0471a8c5f2929baf11"`).
294
+ task (`str`, *optional*):
295
+ The task on which to deploy the model (e.g. `"text-classification"`).
296
+ custom_image (`Dict`, *optional*):
297
+ A custom Docker image to use for the Inference Endpoint. This is useful if you want to deploy an
298
+ Inference Endpoint running on the `text-generation-inference` (TGI) framework (see examples).
299
+ secrets (`Dict[str, str]`, *optional*):
300
+ Secret values to inject in the container environment.
301
+ Returns:
302
+ [`InferenceEndpoint`]: the same Inference Endpoint, mutated in place with the latest data.
303
+ """
304
+ # Make API call
305
+ obj = self._api.update_inference_endpoint(
306
+ name=self.name,
307
+ namespace=self.namespace,
308
+ accelerator=accelerator,
309
+ instance_size=instance_size,
310
+ instance_type=instance_type,
311
+ min_replica=min_replica,
312
+ max_replica=max_replica,
313
+ scale_to_zero_timeout=scale_to_zero_timeout,
314
+ repository=repository,
315
+ framework=framework,
316
+ revision=revision,
317
+ task=task,
318
+ custom_image=custom_image,
319
+ secrets=secrets,
320
+ token=self._token, # type: ignore [arg-type]
321
+ )
322
+
323
+ # Mutate current object
324
+ self.raw = obj.raw
325
+ self._populate_from_raw()
326
+ return self
327
+
328
+ def pause(self) -> "InferenceEndpoint":
329
+ """Pause the Inference Endpoint.
330
+
331
+ A paused Inference Endpoint will not be charged. It can be resumed at any time using [`InferenceEndpoint.resume`].
332
+ This is different than scaling the Inference Endpoint to zero with [`InferenceEndpoint.scale_to_zero`], which
333
+ would be automatically restarted when a request is made to it.
334
+
335
+ This is an alias for [`HfApi.pause_inference_endpoint`]. The current object is mutated in place with the
336
+ latest data from the server.
337
+
338
+ Returns:
339
+ [`InferenceEndpoint`]: the same Inference Endpoint, mutated in place with the latest data.
340
+ """
341
+ obj = self._api.pause_inference_endpoint(name=self.name, namespace=self.namespace, token=self._token) # type: ignore [arg-type]
342
+ self.raw = obj.raw
343
+ self._populate_from_raw()
344
+ return self
345
+
346
+ def resume(self, running_ok: bool = True) -> "InferenceEndpoint":
347
+ """Resume the Inference Endpoint.
348
+
349
+ This is an alias for [`HfApi.resume_inference_endpoint`]. The current object is mutated in place with the
350
+ latest data from the server.
351
+
352
+ Args:
353
+ running_ok (`bool`, *optional*):
354
+ If `True`, the method will not raise an error if the Inference Endpoint is already running. Defaults to
355
+ `True`.
356
+
357
+ Returns:
358
+ [`InferenceEndpoint`]: the same Inference Endpoint, mutated in place with the latest data.
359
+ """
360
+ obj = self._api.resume_inference_endpoint(
361
+ name=self.name, namespace=self.namespace, running_ok=running_ok, token=self._token
362
+ ) # type: ignore [arg-type]
363
+ self.raw = obj.raw
364
+ self._populate_from_raw()
365
+ return self
366
+
367
+ def scale_to_zero(self) -> "InferenceEndpoint":
368
+ """Scale Inference Endpoint to zero.
369
+
370
+ An Inference Endpoint scaled to zero will not be charged. It will be resume on the next request to it, with a
371
+ cold start delay. This is different than pausing the Inference Endpoint with [`InferenceEndpoint.pause`], which
372
+ would require a manual resume with [`InferenceEndpoint.resume`].
373
+
374
+ This is an alias for [`HfApi.scale_to_zero_inference_endpoint`]. The current object is mutated in place with the
375
+ latest data from the server.
376
+
377
+ Returns:
378
+ [`InferenceEndpoint`]: the same Inference Endpoint, mutated in place with the latest data.
379
+ """
380
+ obj = self._api.scale_to_zero_inference_endpoint(name=self.name, namespace=self.namespace, token=self._token) # type: ignore [arg-type]
381
+ self.raw = obj.raw
382
+ self._populate_from_raw()
383
+ return self
384
+
385
+ def delete(self) -> None:
386
+ """Delete the Inference Endpoint.
387
+
388
+ This operation is not reversible. If you don't want to be charged for an Inference Endpoint, it is preferable
389
+ to pause it with [`InferenceEndpoint.pause`] or scale it to zero with [`InferenceEndpoint.scale_to_zero`].
390
+
391
+ This is an alias for [`HfApi.delete_inference_endpoint`].
392
+ """
393
+ self._api.delete_inference_endpoint(name=self.name, namespace=self.namespace, token=self._token) # type: ignore [arg-type]
394
+
395
+ def _populate_from_raw(self) -> None:
396
+ """Populate fields from raw dictionary.
397
+
398
+ Called in __post_init__ + each time the Inference Endpoint is updated.
399
+ """
400
+ # Repr fields
401
+ self.name = self.raw["name"]
402
+ self.repository = self.raw["model"]["repository"]
403
+ self.status = self.raw["status"]["state"]
404
+ self.url = self.raw["status"].get("url")
405
+ self.health_route = self.raw["healthRoute"]
406
+
407
+ # Other fields
408
+ self.framework = self.raw["model"]["framework"]
409
+ self.revision = self.raw["model"]["revision"]
410
+ self.task = self.raw["model"]["task"]
411
+ self.created_at = parse_datetime(self.raw["status"]["createdAt"])
412
+ self.updated_at = parse_datetime(self.raw["status"]["updatedAt"])
413
+ self.type = self.raw["type"]
venv/lib/python3.13/site-packages/huggingface_hub/_jobs_api.py ADDED
@@ -0,0 +1,301 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2025-present, the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ from dataclasses import dataclass
16
+ from datetime import datetime
17
+ from enum import Enum
18
+ from typing import Any, Dict, List, Optional, Union
19
+
20
+ from huggingface_hub import constants
21
+ from huggingface_hub._space_api import SpaceHardware
22
+ from huggingface_hub.utils._datetime import parse_datetime
23
+
24
+
25
+ class JobStage(str, Enum):
26
+ """
27
+ Enumeration of possible stage of a Job on the Hub.
28
+
29
+ Value can be compared to a string:
30
+ ```py
31
+ assert JobStage.COMPLETED == "COMPLETED"
32
+ ```
33
+
34
+ Taken from https://github.com/huggingface/moon-landing/blob/main/server/job_types/JobInfo.ts#L61 (private url).
35
+ """
36
+
37
+ # Copied from moon-landing > server > lib > Job.ts
38
+ COMPLETED = "COMPLETED"
39
+ CANCELED = "CANCELED"
40
+ ERROR = "ERROR"
41
+ DELETED = "DELETED"
42
+ RUNNING = "RUNNING"
43
+
44
+
45
+ @dataclass
46
+ class JobStatus:
47
+ stage: JobStage
48
+ message: Optional[str]
49
+
50
+
51
+ @dataclass
52
+ class JobOwner:
53
+ id: str
54
+ name: str
55
+ type: str
56
+
57
+
58
+ @dataclass
59
+ class JobInfo:
60
+ """
61
+ Contains information about a Job.
62
+
63
+ Args:
64
+ id (`str`):
65
+ Job ID.
66
+ created_at (`datetime` or `None`):
67
+ When the Job was created.
68
+ docker_image (`str` or `None`):
69
+ The Docker image from Docker Hub used for the Job.
70
+ Can be None if space_id is present instead.
71
+ space_id (`str` or `None`):
72
+ The Docker image from Hugging Face Spaces used for the Job.
73
+ Can be None if docker_image is present instead.
74
+ command (`List[str]` or `None`):
75
+ Command of the Job, e.g. `["python", "-c", "print('hello world')"]`
76
+ arguments (`List[str]` or `None`):
77
+ Arguments passed to the command
78
+ environment (`Dict[str]` or `None`):
79
+ Environment variables of the Job as a dictionary.
80
+ secrets (`Dict[str]` or `None`):
81
+ Secret environment variables of the Job (encrypted).
82
+ flavor (`str` or `None`):
83
+ Flavor for the hardware, as in Hugging Face Spaces. See [`SpaceHardware`] for possible values.
84
+ E.g. `"cpu-basic"`.
85
+ status: (`JobStatus` or `None`):
86
+ Status of the Job, e.g. `JobStatus(stage="RUNNING", message=None)`
87
+ See [`JobStage`] for possible stage values.
88
+ owner: (`JobOwner` or `None`):
89
+ Owner of the Job, e.g. `JobOwner(id="5e9ecfc04957053f60648a3e", name="lhoestq", type="user")`
90
+
91
+ Example:
92
+
93
+ ```python
94
+ >>> from huggingface_hub import run_job
95
+ >>> job = run_job(
96
+ ... image="python:3.12",
97
+ ... command=["python", "-c", "print('Hello from the cloud!')"]
98
+ ... )
99
+ >>> job
100
+ JobInfo(id='687fb701029421ae5549d998', created_at=datetime.datetime(2025, 7, 22, 16, 6, 25, 79000, tzinfo=datetime.timezone.utc), docker_image='python:3.12', space_id=None, command=['python', '-c', "print('Hello from the cloud!')"], arguments=[], environment={}, secrets={}, flavor='cpu-basic', status=JobStatus(stage='RUNNING', message=None), owner=JobOwner(id='5e9ecfc04957053f60648a3e', name='lhoestq', type='user'), endpoint='https://huggingface.co', url='https://huggingface.co/jobs/lhoestq/687fb701029421ae5549d998')
101
+ >>> job.id
102
+ '687fb701029421ae5549d998'
103
+ >>> job.url
104
+ 'https://huggingface.co/jobs/lhoestq/687fb701029421ae5549d998'
105
+ >>> job.status.stage
106
+ 'RUNNING'
107
+ ```
108
+ """
109
+
110
+ id: str
111
+ created_at: Optional[datetime]
112
+ docker_image: Optional[str]
113
+ space_id: Optional[str]
114
+ command: Optional[List[str]]
115
+ arguments: Optional[List[str]]
116
+ environment: Optional[Dict[str, Any]]
117
+ secrets: Optional[Dict[str, Any]]
118
+ flavor: Optional[SpaceHardware]
119
+ status: JobStatus
120
+ owner: JobOwner
121
+
122
+ # Inferred fields
123
+ endpoint: str
124
+ url: str
125
+
126
+ def __init__(self, **kwargs) -> None:
127
+ self.id = kwargs["id"]
128
+ created_at = kwargs.get("createdAt") or kwargs.get("created_at")
129
+ self.created_at = parse_datetime(created_at) if created_at else None
130
+ self.docker_image = kwargs.get("dockerImage") or kwargs.get("docker_image")
131
+ self.space_id = kwargs.get("spaceId") or kwargs.get("space_id")
132
+ owner = kwargs.get("owner", {})
133
+ self.owner = JobOwner(id=owner["id"], name=owner["name"], type=owner["type"])
134
+ self.command = kwargs.get("command")
135
+ self.arguments = kwargs.get("arguments")
136
+ self.environment = kwargs.get("environment")
137
+ self.secrets = kwargs.get("secrets")
138
+ self.flavor = kwargs.get("flavor")
139
+ status = kwargs.get("status", {})
140
+ self.status = JobStatus(stage=status["stage"], message=status.get("message"))
141
+
142
+ # Inferred fields
143
+ self.endpoint = kwargs.get("endpoint", constants.ENDPOINT)
144
+ self.url = f"{self.endpoint}/jobs/{self.owner.name}/{self.id}"
145
+
146
+
147
+ @dataclass
148
+ class JobSpec:
149
+ docker_image: Optional[str]
150
+ space_id: Optional[str]
151
+ command: Optional[List[str]]
152
+ arguments: Optional[List[str]]
153
+ environment: Optional[Dict[str, Any]]
154
+ secrets: Optional[Dict[str, Any]]
155
+ flavor: Optional[SpaceHardware]
156
+ timeout: Optional[int]
157
+ tags: Optional[List[str]]
158
+ arch: Optional[str]
159
+
160
+ def __init__(self, **kwargs) -> None:
161
+ self.docker_image = kwargs.get("dockerImage") or kwargs.get("docker_image")
162
+ self.space_id = kwargs.get("spaceId") or kwargs.get("space_id")
163
+ self.command = kwargs.get("command")
164
+ self.arguments = kwargs.get("arguments")
165
+ self.environment = kwargs.get("environment")
166
+ self.secrets = kwargs.get("secrets")
167
+ self.flavor = kwargs.get("flavor")
168
+ self.timeout = kwargs.get("timeout")
169
+ self.tags = kwargs.get("tags")
170
+ self.arch = kwargs.get("arch")
171
+
172
+
173
+ @dataclass
174
+ class LastJobInfo:
175
+ id: str
176
+ at: datetime
177
+
178
+ def __init__(self, **kwargs) -> None:
179
+ self.id = kwargs["id"]
180
+ self.at = parse_datetime(kwargs["at"])
181
+
182
+
183
+ @dataclass
184
+ class ScheduledJobStatus:
185
+ last_job: Optional[LastJobInfo]
186
+ next_job_run_at: Optional[datetime]
187
+
188
+ def __init__(self, **kwargs) -> None:
189
+ last_job = kwargs.get("lastJob") or kwargs.get("last_job")
190
+ self.last_job = LastJobInfo(**last_job) if last_job else None
191
+ next_job_run_at = kwargs.get("nextJobRunAt") or kwargs.get("next_job_run_at")
192
+ self.next_job_run_at = parse_datetime(str(next_job_run_at)) if next_job_run_at else None
193
+
194
+
195
+ @dataclass
196
+ class ScheduledJobInfo:
197
+ """
198
+ Contains information about a Job.
199
+
200
+ Args:
201
+ id (`str`):
202
+ Scheduled Job ID.
203
+ created_at (`datetime` or `None`):
204
+ When the scheduled Job was created.
205
+ tags (`List[str]` or `None`):
206
+ The tags of the scheduled Job.
207
+ schedule (`str` or `None`):
208
+ One of "@annually", "@yearly", "@monthly", "@weekly", "@daily", "@hourly", or a
209
+ CRON schedule expression (e.g., '0 9 * * 1' for 9 AM every Monday).
210
+ suspend (`bool` or `None`):
211
+ Whether the scheduled job is suspended (paused).
212
+ concurrency (`bool` or `None`):
213
+ Whether multiple instances of this Job can run concurrently.
214
+ status (`ScheduledJobStatus` or `None`):
215
+ Status of the scheduled Job.
216
+ owner: (`JobOwner` or `None`):
217
+ Owner of the scheduled Job, e.g. `JobOwner(id="5e9ecfc04957053f60648a3e", name="lhoestq", type="user")`
218
+ job_spec: (`JobSpec` or `None`):
219
+ Specifications of the Job.
220
+
221
+ Example:
222
+
223
+ ```python
224
+ >>> from huggingface_hub import run_job
225
+ >>> scheduled_job = create_scheduled_job(
226
+ ... image="python:3.12",
227
+ ... command=["python", "-c", "print('Hello from the cloud!')"],
228
+ ... schedule="@hourly",
229
+ ... )
230
+ >>> scheduled_job.id
231
+ '687fb701029421ae5549d999'
232
+ >>> scheduled_job.status.next_job_run_at
233
+ datetime.datetime(2025, 7, 22, 17, 6, 25, 79000, tzinfo=datetime.timezone.utc)
234
+ ```
235
+ """
236
+
237
+ id: str
238
+ created_at: Optional[datetime]
239
+ job_spec: JobSpec
240
+ schedule: Optional[str]
241
+ suspend: Optional[bool]
242
+ concurrency: Optional[bool]
243
+ status: ScheduledJobStatus
244
+ owner: JobOwner
245
+
246
+ def __init__(self, **kwargs) -> None:
247
+ self.id = kwargs["id"]
248
+ created_at = kwargs.get("createdAt") or kwargs.get("created_at")
249
+ self.created_at = parse_datetime(created_at) if created_at else None
250
+ self.job_spec = JobSpec(**(kwargs.get("job_spec") or kwargs.get("jobSpec", {})))
251
+ self.schedule = kwargs.get("schedule")
252
+ self.suspend = kwargs.get("suspend")
253
+ self.concurrency = kwargs.get("concurrency")
254
+ status = kwargs.get("status", {})
255
+ self.status = ScheduledJobStatus(
256
+ last_job=status.get("last_job") or status.get("lastJob"),
257
+ next_job_run_at=status.get("next_job_run_at") or status.get("nextJobRunAt"),
258
+ )
259
+ owner = kwargs.get("owner", {})
260
+ self.owner = JobOwner(id=owner["id"], name=owner["name"], type=owner["type"])
261
+
262
+
263
+ def _create_job_spec(
264
+ *,
265
+ image: str,
266
+ command: List[str],
267
+ env: Optional[Dict[str, Any]],
268
+ secrets: Optional[Dict[str, Any]],
269
+ flavor: Optional[SpaceHardware],
270
+ timeout: Optional[Union[int, float, str]],
271
+ ) -> Dict[str, Any]:
272
+ # prepare job spec to send to HF Jobs API
273
+ job_spec: Dict[str, Any] = {
274
+ "command": command,
275
+ "arguments": [],
276
+ "environment": env or {},
277
+ "flavor": flavor or SpaceHardware.CPU_BASIC,
278
+ }
279
+ # secrets are optional
280
+ if secrets:
281
+ job_spec["secrets"] = secrets
282
+ # timeout is optional
283
+ if timeout:
284
+ time_units_factors = {"s": 1, "m": 60, "h": 3600, "d": 3600 * 24}
285
+ if isinstance(timeout, str) and timeout[-1] in time_units_factors:
286
+ job_spec["timeoutSeconds"] = int(float(timeout[:-1]) * time_units_factors[timeout[-1]])
287
+ else:
288
+ job_spec["timeoutSeconds"] = int(timeout)
289
+ # input is either from docker hub or from HF spaces
290
+ for prefix in (
291
+ "https://huggingface.co/spaces/",
292
+ "https://hf.co/spaces/",
293
+ "huggingface.co/spaces/",
294
+ "hf.co/spaces/",
295
+ ):
296
+ if image.startswith(prefix):
297
+ job_spec["spaceId"] = image[len(prefix) :]
298
+ break
299
+ else:
300
+ job_spec["dockerImage"] = image
301
+ return job_spec
venv/lib/python3.13/site-packages/huggingface_hub/_local_folder.py ADDED
@@ -0,0 +1,447 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024-present, the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Contains utilities to handle the `../.cache/huggingface` folder in local directories.
16
+
17
+ First discussed in https://github.com/huggingface/huggingface_hub/issues/1738 to store
18
+ download metadata when downloading files from the hub to a local directory (without
19
+ using the cache).
20
+
21
+ ./.cache/huggingface folder structure:
22
+ [4.0K] data
23
+ ├── [4.0K] .cache
24
+ │ └── [4.0K] huggingface
25
+ │ └── [4.0K] download
26
+ │ ├── [ 16] file.parquet.metadata
27
+ │ ├── [ 16] file.txt.metadata
28
+ │ └── [4.0K] folder
29
+ │ └── [ 16] file.parquet.metadata
30
+
31
+ ├── [6.5G] file.parquet
32
+ ├── [1.5K] file.txt
33
+ └── [4.0K] folder
34
+ └── [ 16] file.parquet
35
+
36
+
37
+ Download metadata file structure:
38
+ ```
39
+ # file.txt.metadata
40
+ 11c5a3d5811f50298f278a704980280950aedb10
41
+ a16a55fda99d2f2e7b69cce5cf93ff4ad3049930
42
+ 1712656091.123
43
+
44
+ # file.parquet.metadata
45
+ 11c5a3d5811f50298f278a704980280950aedb10
46
+ 7c5d3f4b8b76583b422fcb9189ad6c89d5d97a094541ce8932dce3ecabde1421
47
+ 1712656091.123
48
+ }
49
+ ```
50
+ """
51
+
52
+ import base64
53
+ import hashlib
54
+ import logging
55
+ import os
56
+ import time
57
+ from dataclasses import dataclass
58
+ from pathlib import Path
59
+ from typing import Optional
60
+
61
+ from .utils import WeakFileLock
62
+
63
+
64
+ logger = logging.getLogger(__name__)
65
+
66
+
67
+ @dataclass
68
+ class LocalDownloadFilePaths:
69
+ """
70
+ Paths to the files related to a download process in a local dir.
71
+
72
+ Returned by [`get_local_download_paths`].
73
+
74
+ Attributes:
75
+ file_path (`Path`):
76
+ Path where the file will be saved.
77
+ lock_path (`Path`):
78
+ Path to the lock file used to ensure atomicity when reading/writing metadata.
79
+ metadata_path (`Path`):
80
+ Path to the metadata file.
81
+ """
82
+
83
+ file_path: Path
84
+ lock_path: Path
85
+ metadata_path: Path
86
+
87
+ def incomplete_path(self, etag: str) -> Path:
88
+ """Return the path where a file will be temporarily downloaded before being moved to `file_path`."""
89
+ path = self.metadata_path.parent / f"{_short_hash(self.metadata_path.name)}.{etag}.incomplete"
90
+ resolved_path = str(path.resolve())
91
+ # Some Windows versions do not allow for paths longer than 255 characters.
92
+ # In this case, we must specify it as an extended path by using the "\\?\" prefix.
93
+ if os.name == "nt" and len(resolved_path) > 255 and not resolved_path.startswith("\\\\?\\"):
94
+ path = Path("\\\\?\\" + resolved_path)
95
+ return path
96
+
97
+
98
+ @dataclass(frozen=True)
99
+ class LocalUploadFilePaths:
100
+ """
101
+ Paths to the files related to an upload process in a local dir.
102
+
103
+ Returned by [`get_local_upload_paths`].
104
+
105
+ Attributes:
106
+ path_in_repo (`str`):
107
+ Path of the file in the repo.
108
+ file_path (`Path`):
109
+ Path where the file will be saved.
110
+ lock_path (`Path`):
111
+ Path to the lock file used to ensure atomicity when reading/writing metadata.
112
+ metadata_path (`Path`):
113
+ Path to the metadata file.
114
+ """
115
+
116
+ path_in_repo: str
117
+ file_path: Path
118
+ lock_path: Path
119
+ metadata_path: Path
120
+
121
+
122
+ @dataclass
123
+ class LocalDownloadFileMetadata:
124
+ """
125
+ Metadata about a file in the local directory related to a download process.
126
+
127
+ Attributes:
128
+ filename (`str`):
129
+ Path of the file in the repo.
130
+ commit_hash (`str`):
131
+ Commit hash of the file in the repo.
132
+ etag (`str`):
133
+ ETag of the file in the repo. Used to check if the file has changed.
134
+ For LFS files, this is the sha256 of the file. For regular files, it corresponds to the git hash.
135
+ timestamp (`int`):
136
+ Unix timestamp of when the metadata was saved i.e. when the metadata was accurate.
137
+ """
138
+
139
+ filename: str
140
+ commit_hash: str
141
+ etag: str
142
+ timestamp: float
143
+
144
+
145
+ @dataclass
146
+ class LocalUploadFileMetadata:
147
+ """
148
+ Metadata about a file in the local directory related to an upload process.
149
+ """
150
+
151
+ size: int
152
+
153
+ # Default values correspond to "we don't know yet"
154
+ timestamp: Optional[float] = None
155
+ should_ignore: Optional[bool] = None
156
+ sha256: Optional[str] = None
157
+ upload_mode: Optional[str] = None
158
+ remote_oid: Optional[str] = None
159
+ is_uploaded: bool = False
160
+ is_committed: bool = False
161
+
162
+ def save(self, paths: LocalUploadFilePaths) -> None:
163
+ """Save the metadata to disk."""
164
+ with WeakFileLock(paths.lock_path):
165
+ with paths.metadata_path.open("w") as f:
166
+ new_timestamp = time.time()
167
+ f.write(str(new_timestamp) + "\n")
168
+
169
+ f.write(str(self.size)) # never None
170
+ f.write("\n")
171
+
172
+ if self.should_ignore is not None:
173
+ f.write(str(int(self.should_ignore)))
174
+ f.write("\n")
175
+
176
+ if self.sha256 is not None:
177
+ f.write(self.sha256)
178
+ f.write("\n")
179
+
180
+ if self.upload_mode is not None:
181
+ f.write(self.upload_mode)
182
+ f.write("\n")
183
+
184
+ if self.remote_oid is not None:
185
+ f.write(self.remote_oid)
186
+ f.write("\n")
187
+
188
+ f.write(str(int(self.is_uploaded)) + "\n")
189
+ f.write(str(int(self.is_committed)) + "\n")
190
+
191
+ self.timestamp = new_timestamp
192
+
193
+
194
+ def get_local_download_paths(local_dir: Path, filename: str) -> LocalDownloadFilePaths:
195
+ """Compute paths to the files related to a download process.
196
+
197
+ Folders containing the paths are all guaranteed to exist.
198
+
199
+ Args:
200
+ local_dir (`Path`):
201
+ Path to the local directory in which files are downloaded.
202
+ filename (`str`):
203
+ Path of the file in the repo.
204
+
205
+ Return:
206
+ [`LocalDownloadFilePaths`]: the paths to the files (file_path, lock_path, metadata_path, incomplete_path).
207
+ """
208
+ # filename is the path in the Hub repository (separated by '/')
209
+ # make sure to have a cross platform transcription
210
+ sanitized_filename = os.path.join(*filename.split("/"))
211
+ if os.name == "nt":
212
+ if sanitized_filename.startswith("..\\") or "\\..\\" in sanitized_filename:
213
+ raise ValueError(
214
+ f"Invalid filename: cannot handle filename '{sanitized_filename}' on Windows. Please ask the repository"
215
+ " owner to rename this file."
216
+ )
217
+ file_path = local_dir / sanitized_filename
218
+ metadata_path = _huggingface_dir(local_dir) / "download" / f"{sanitized_filename}.metadata"
219
+ lock_path = metadata_path.with_suffix(".lock")
220
+
221
+ # Some Windows versions do not allow for paths longer than 255 characters.
222
+ # In this case, we must specify it as an extended path by using the "\\?\" prefix
223
+ if os.name == "nt":
224
+ if not str(local_dir).startswith("\\\\?\\") and len(os.path.abspath(lock_path)) > 255:
225
+ file_path = Path("\\\\?\\" + os.path.abspath(file_path))
226
+ lock_path = Path("\\\\?\\" + os.path.abspath(lock_path))
227
+ metadata_path = Path("\\\\?\\" + os.path.abspath(metadata_path))
228
+
229
+ file_path.parent.mkdir(parents=True, exist_ok=True)
230
+ metadata_path.parent.mkdir(parents=True, exist_ok=True)
231
+ return LocalDownloadFilePaths(file_path=file_path, lock_path=lock_path, metadata_path=metadata_path)
232
+
233
+
234
+ def get_local_upload_paths(local_dir: Path, filename: str) -> LocalUploadFilePaths:
235
+ """Compute paths to the files related to an upload process.
236
+
237
+ Folders containing the paths are all guaranteed to exist.
238
+
239
+ Args:
240
+ local_dir (`Path`):
241
+ Path to the local directory that is uploaded.
242
+ filename (`str`):
243
+ Path of the file in the repo.
244
+
245
+ Return:
246
+ [`LocalUploadFilePaths`]: the paths to the files (file_path, lock_path, metadata_path).
247
+ """
248
+ # filename is the path in the Hub repository (separated by '/')
249
+ # make sure to have a cross platform transcription
250
+ sanitized_filename = os.path.join(*filename.split("/"))
251
+ if os.name == "nt":
252
+ if sanitized_filename.startswith("..\\") or "\\..\\" in sanitized_filename:
253
+ raise ValueError(
254
+ f"Invalid filename: cannot handle filename '{sanitized_filename}' on Windows. Please ask the repository"
255
+ " owner to rename this file."
256
+ )
257
+ file_path = local_dir / sanitized_filename
258
+ metadata_path = _huggingface_dir(local_dir) / "upload" / f"{sanitized_filename}.metadata"
259
+ lock_path = metadata_path.with_suffix(".lock")
260
+
261
+ # Some Windows versions do not allow for paths longer than 255 characters.
262
+ # In this case, we must specify it as an extended path by using the "\\?\" prefix
263
+ if os.name == "nt":
264
+ if not str(local_dir).startswith("\\\\?\\") and len(os.path.abspath(lock_path)) > 255:
265
+ file_path = Path("\\\\?\\" + os.path.abspath(file_path))
266
+ lock_path = Path("\\\\?\\" + os.path.abspath(lock_path))
267
+ metadata_path = Path("\\\\?\\" + os.path.abspath(metadata_path))
268
+
269
+ file_path.parent.mkdir(parents=True, exist_ok=True)
270
+ metadata_path.parent.mkdir(parents=True, exist_ok=True)
271
+ return LocalUploadFilePaths(
272
+ path_in_repo=filename, file_path=file_path, lock_path=lock_path, metadata_path=metadata_path
273
+ )
274
+
275
+
276
+ def read_download_metadata(local_dir: Path, filename: str) -> Optional[LocalDownloadFileMetadata]:
277
+ """Read metadata about a file in the local directory related to a download process.
278
+
279
+ Args:
280
+ local_dir (`Path`):
281
+ Path to the local directory in which files are downloaded.
282
+ filename (`str`):
283
+ Path of the file in the repo.
284
+
285
+ Return:
286
+ `[LocalDownloadFileMetadata]` or `None`: the metadata if it exists, `None` otherwise.
287
+ """
288
+ paths = get_local_download_paths(local_dir, filename)
289
+ with WeakFileLock(paths.lock_path):
290
+ if paths.metadata_path.exists():
291
+ try:
292
+ with paths.metadata_path.open() as f:
293
+ commit_hash = f.readline().strip()
294
+ etag = f.readline().strip()
295
+ timestamp = float(f.readline().strip())
296
+ metadata = LocalDownloadFileMetadata(
297
+ filename=filename,
298
+ commit_hash=commit_hash,
299
+ etag=etag,
300
+ timestamp=timestamp,
301
+ )
302
+ except Exception as e:
303
+ # remove the metadata file if it is corrupted / not the right format
304
+ logger.warning(
305
+ f"Invalid metadata file {paths.metadata_path}: {e}. Removing it from disk and continue."
306
+ )
307
+ try:
308
+ paths.metadata_path.unlink()
309
+ except Exception as e:
310
+ logger.warning(f"Could not remove corrupted metadata file {paths.metadata_path}: {e}")
311
+
312
+ try:
313
+ # check if the file exists and hasn't been modified since the metadata was saved
314
+ stat = paths.file_path.stat()
315
+ if (
316
+ stat.st_mtime - 1 <= metadata.timestamp
317
+ ): # allow 1s difference as stat.st_mtime might not be precise
318
+ return metadata
319
+ logger.info(f"Ignored metadata for '{filename}' (outdated). Will re-compute hash.")
320
+ except FileNotFoundError:
321
+ # file does not exist => metadata is outdated
322
+ return None
323
+ return None
324
+
325
+
326
+ def read_upload_metadata(local_dir: Path, filename: str) -> LocalUploadFileMetadata:
327
+ """Read metadata about a file in the local directory related to an upload process.
328
+
329
+ TODO: factorize logic with `read_download_metadata`.
330
+
331
+ Args:
332
+ local_dir (`Path`):
333
+ Path to the local directory in which files are downloaded.
334
+ filename (`str`):
335
+ Path of the file in the repo.
336
+
337
+ Return:
338
+ `[LocalUploadFileMetadata]` or `None`: the metadata if it exists, `None` otherwise.
339
+ """
340
+ paths = get_local_upload_paths(local_dir, filename)
341
+ with WeakFileLock(paths.lock_path):
342
+ if paths.metadata_path.exists():
343
+ try:
344
+ with paths.metadata_path.open() as f:
345
+ timestamp = float(f.readline().strip())
346
+
347
+ size = int(f.readline().strip()) # never None
348
+
349
+ _should_ignore = f.readline().strip()
350
+ should_ignore = None if _should_ignore == "" else bool(int(_should_ignore))
351
+
352
+ _sha256 = f.readline().strip()
353
+ sha256 = None if _sha256 == "" else _sha256
354
+
355
+ _upload_mode = f.readline().strip()
356
+ upload_mode = None if _upload_mode == "" else _upload_mode
357
+ if upload_mode not in (None, "regular", "lfs"):
358
+ raise ValueError(f"Invalid upload mode in metadata {paths.path_in_repo}: {upload_mode}")
359
+
360
+ _remote_oid = f.readline().strip()
361
+ remote_oid = None if _remote_oid == "" else _remote_oid
362
+
363
+ is_uploaded = bool(int(f.readline().strip()))
364
+ is_committed = bool(int(f.readline().strip()))
365
+
366
+ metadata = LocalUploadFileMetadata(
367
+ timestamp=timestamp,
368
+ size=size,
369
+ should_ignore=should_ignore,
370
+ sha256=sha256,
371
+ upload_mode=upload_mode,
372
+ remote_oid=remote_oid,
373
+ is_uploaded=is_uploaded,
374
+ is_committed=is_committed,
375
+ )
376
+ except Exception as e:
377
+ # remove the metadata file if it is corrupted / not the right format
378
+ logger.warning(
379
+ f"Invalid metadata file {paths.metadata_path}: {e}. Removing it from disk and continue."
380
+ )
381
+ try:
382
+ paths.metadata_path.unlink()
383
+ except Exception as e:
384
+ logger.warning(f"Could not remove corrupted metadata file {paths.metadata_path}: {e}")
385
+
386
+ # TODO: can we do better?
387
+ if (
388
+ metadata.timestamp is not None
389
+ and metadata.is_uploaded # file was uploaded
390
+ and not metadata.is_committed # but not committed
391
+ and time.time() - metadata.timestamp > 20 * 3600 # and it's been more than 20 hours
392
+ ): # => we consider it as garbage-collected by S3
393
+ metadata.is_uploaded = False
394
+
395
+ # check if the file exists and hasn't been modified since the metadata was saved
396
+ try:
397
+ if metadata.timestamp is not None and paths.file_path.stat().st_mtime <= metadata.timestamp:
398
+ return metadata
399
+ logger.info(f"Ignored metadata for '{filename}' (outdated). Will re-compute hash.")
400
+ except FileNotFoundError:
401
+ # file does not exist => metadata is outdated
402
+ pass
403
+
404
+ # empty metadata => we don't know anything expect its size
405
+ return LocalUploadFileMetadata(size=paths.file_path.stat().st_size)
406
+
407
+
408
+ def write_download_metadata(local_dir: Path, filename: str, commit_hash: str, etag: str) -> None:
409
+ """Write metadata about a file in the local directory related to a download process.
410
+
411
+ Args:
412
+ local_dir (`Path`):
413
+ Path to the local directory in which files are downloaded.
414
+ """
415
+ paths = get_local_download_paths(local_dir, filename)
416
+ with WeakFileLock(paths.lock_path):
417
+ with paths.metadata_path.open("w") as f:
418
+ f.write(f"{commit_hash}\n{etag}\n{time.time()}\n")
419
+
420
+
421
+ def _huggingface_dir(local_dir: Path) -> Path:
422
+ """Return the path to the `.cache/huggingface` directory in a local directory."""
423
+ # Wrap in lru_cache to avoid overwriting the .gitignore file if called multiple times
424
+ path = local_dir / ".cache" / "huggingface"
425
+ path.mkdir(exist_ok=True, parents=True)
426
+
427
+ # Create a .gitignore file in the .cache/huggingface directory if it doesn't exist
428
+ # Should be thread-safe enough like this.
429
+ gitignore = path / ".gitignore"
430
+ gitignore_lock = path / ".gitignore.lock"
431
+ if not gitignore.exists():
432
+ try:
433
+ with WeakFileLock(gitignore_lock, timeout=0.1):
434
+ gitignore.write_text("*")
435
+ except IndexError:
436
+ pass
437
+ except OSError: # TimeoutError, FileNotFoundError, PermissionError, etc.
438
+ pass
439
+ try:
440
+ gitignore_lock.unlink()
441
+ except OSError:
442
+ pass
443
+ return path
444
+
445
+
446
+ def _short_hash(filename: str) -> str:
447
+ return base64.urlsafe_b64encode(hashlib.sha1(filename.encode()).digest()).decode()
venv/lib/python3.13/site-packages/huggingface_hub/_login.py ADDED
@@ -0,0 +1,520 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Contains methods to log in to the Hub."""
15
+
16
+ import os
17
+ import subprocess
18
+ from getpass import getpass
19
+ from pathlib import Path
20
+ from typing import Optional
21
+
22
+ from . import constants
23
+ from .commands._cli_utils import ANSI
24
+ from .utils import (
25
+ capture_output,
26
+ get_token,
27
+ is_google_colab,
28
+ is_notebook,
29
+ list_credential_helpers,
30
+ logging,
31
+ run_subprocess,
32
+ set_git_credential,
33
+ unset_git_credential,
34
+ )
35
+ from .utils._auth import (
36
+ _get_token_by_name,
37
+ _get_token_from_environment,
38
+ _get_token_from_file,
39
+ _get_token_from_google_colab,
40
+ _save_stored_tokens,
41
+ _save_token,
42
+ get_stored_tokens,
43
+ )
44
+ from .utils._deprecation import _deprecate_arguments, _deprecate_positional_args
45
+
46
+
47
+ logger = logging.get_logger(__name__)
48
+
49
+ _HF_LOGO_ASCII = """
50
+ _| _| _| _| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _|_|_|_| _|_| _|_|_| _|_|_|_|
51
+ _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _|
52
+ _|_|_|_| _| _| _| _|_| _| _|_| _| _| _| _| _| _|_| _|_|_| _|_|_|_| _| _|_|_|
53
+ _| _| _| _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _|
54
+ _| _| _|_| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _| _| _| _|_|_| _|_|_|_|
55
+ """
56
+
57
+
58
+ @_deprecate_arguments(
59
+ version="1.0",
60
+ deprecated_args="write_permission",
61
+ custom_message="Fine-grained tokens added complexity to the permissions, making it irrelevant to check if a token has 'write' access.",
62
+ )
63
+ @_deprecate_positional_args(version="1.0")
64
+ def login(
65
+ token: Optional[str] = None,
66
+ *,
67
+ add_to_git_credential: bool = False,
68
+ new_session: bool = True,
69
+ write_permission: bool = False,
70
+ ) -> None:
71
+ """Login the machine to access the Hub.
72
+
73
+ The `token` is persisted in cache and set as a git credential. Once done, the machine
74
+ is logged in and the access token will be available across all `huggingface_hub`
75
+ components. If `token` is not provided, it will be prompted to the user either with
76
+ a widget (in a notebook) or via the terminal.
77
+
78
+ To log in from outside of a script, one can also use `hf auth login` which is
79
+ a cli command that wraps [`login`].
80
+
81
+ <Tip>
82
+
83
+ [`login`] is a drop-in replacement method for [`notebook_login`] as it wraps and
84
+ extends its capabilities.
85
+
86
+ </Tip>
87
+
88
+ <Tip>
89
+
90
+ When the token is not passed, [`login`] will automatically detect if the script runs
91
+ in a notebook or not. However, this detection might not be accurate due to the
92
+ variety of notebooks that exists nowadays. If that is the case, you can always force
93
+ the UI by using [`notebook_login`] or [`interpreter_login`].
94
+
95
+ </Tip>
96
+
97
+ Args:
98
+ token (`str`, *optional*):
99
+ User access token to generate from https://huggingface.co/settings/token.
100
+ add_to_git_credential (`bool`, defaults to `False`):
101
+ If `True`, token will be set as git credential. If no git credential helper
102
+ is configured, a warning will be displayed to the user. If `token` is `None`,
103
+ the value of `add_to_git_credential` is ignored and will be prompted again
104
+ to the end user.
105
+ new_session (`bool`, defaults to `True`):
106
+ If `True`, will request a token even if one is already saved on the machine.
107
+ write_permission (`bool`):
108
+ Ignored and deprecated argument.
109
+ Raises:
110
+ [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
111
+ If an organization token is passed. Only personal account tokens are valid
112
+ to log in.
113
+ [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
114
+ If token is invalid.
115
+ [`ImportError`](https://docs.python.org/3/library/exceptions.html#ImportError)
116
+ If running in a notebook but `ipywidgets` is not installed.
117
+ """
118
+ if token is not None:
119
+ if not add_to_git_credential:
120
+ logger.info(
121
+ "The token has not been saved to the git credentials helper. Pass "
122
+ "`add_to_git_credential=True` in this function directly or "
123
+ "`--add-to-git-credential` if using via `hf`CLI if "
124
+ "you want to set the git credential as well."
125
+ )
126
+ _login(token, add_to_git_credential=add_to_git_credential)
127
+ elif is_notebook():
128
+ notebook_login(new_session=new_session)
129
+ else:
130
+ interpreter_login(new_session=new_session)
131
+
132
+
133
+ def logout(token_name: Optional[str] = None) -> None:
134
+ """Logout the machine from the Hub.
135
+
136
+ Token is deleted from the machine and removed from git credential.
137
+
138
+ Args:
139
+ token_name (`str`, *optional*):
140
+ Name of the access token to logout from. If `None`, will logout from all saved access tokens.
141
+ Raises:
142
+ [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError):
143
+ If the access token name is not found.
144
+ """
145
+ if get_token() is None and not get_stored_tokens(): # No active token and no saved access tokens
146
+ logger.warning("Not logged in!")
147
+ return
148
+ if not token_name:
149
+ # Delete all saved access tokens and token
150
+ for file_path in (constants.HF_TOKEN_PATH, constants.HF_STORED_TOKENS_PATH):
151
+ try:
152
+ Path(file_path).unlink()
153
+ except FileNotFoundError:
154
+ pass
155
+ logger.info("Successfully logged out from all access tokens.")
156
+ else:
157
+ _logout_from_token(token_name)
158
+ logger.info(f"Successfully logged out from access token: {token_name}.")
159
+
160
+ unset_git_credential()
161
+
162
+ # Check if still logged in
163
+ if _get_token_from_google_colab() is not None:
164
+ raise EnvironmentError(
165
+ "You are automatically logged in using a Google Colab secret.\n"
166
+ "To log out, you must unset the `HF_TOKEN` secret in your Colab settings."
167
+ )
168
+ if _get_token_from_environment() is not None:
169
+ raise EnvironmentError(
170
+ "Token has been deleted from your machine but you are still logged in.\n"
171
+ "To log out, you must clear out both `HF_TOKEN` and `HUGGING_FACE_HUB_TOKEN` environment variables."
172
+ )
173
+
174
+
175
+ def auth_switch(token_name: str, add_to_git_credential: bool = False) -> None:
176
+ """Switch to a different access token.
177
+
178
+ Args:
179
+ token_name (`str`):
180
+ Name of the access token to switch to.
181
+ add_to_git_credential (`bool`, defaults to `False`):
182
+ If `True`, token will be set as git credential. If no git credential helper
183
+ is configured, a warning will be displayed to the user. If `token` is `None`,
184
+ the value of `add_to_git_credential` is ignored and will be prompted again
185
+ to the end user.
186
+
187
+ Raises:
188
+ [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError):
189
+ If the access token name is not found.
190
+ """
191
+ token = _get_token_by_name(token_name)
192
+ if not token:
193
+ raise ValueError(f"Access token {token_name} not found in {constants.HF_STORED_TOKENS_PATH}")
194
+ # Write token to HF_TOKEN_PATH
195
+ _set_active_token(token_name, add_to_git_credential)
196
+ logger.info(f"The current active token is: {token_name}")
197
+ token_from_environment = _get_token_from_environment()
198
+ if token_from_environment is not None and token_from_environment != token:
199
+ logger.warning(
200
+ "The environment variable `HF_TOKEN` is set and will override the access token you've just switched to."
201
+ )
202
+
203
+
204
+ def auth_list() -> None:
205
+ """List all stored access tokens."""
206
+ tokens = get_stored_tokens()
207
+
208
+ if not tokens:
209
+ logger.info("No access tokens found.")
210
+ return
211
+ # Find current token
212
+ current_token = get_token()
213
+ current_token_name = None
214
+ for token_name in tokens:
215
+ if tokens.get(token_name) == current_token:
216
+ current_token_name = token_name
217
+ # Print header
218
+ max_offset = max(len("token"), max(len(token) for token in tokens)) + 2
219
+ print(f" {{:<{max_offset}}}| {{:<15}}".format("name", "token"))
220
+ print("-" * (max_offset + 2) + "|" + "-" * 15)
221
+
222
+ # Print saved access tokens
223
+ for token_name in tokens:
224
+ token = tokens.get(token_name, "<not set>")
225
+ masked_token = f"{token[:3]}****{token[-4:]}" if token != "<not set>" else token
226
+ is_current = "*" if token == current_token else " "
227
+
228
+ print(f"{is_current} {{:<{max_offset}}}| {{:<15}}".format(token_name, masked_token))
229
+
230
+ if _get_token_from_environment():
231
+ logger.warning(
232
+ "\nNote: Environment variable `HF_TOKEN` is set and is the current active token independently from the stored tokens listed above."
233
+ )
234
+ elif current_token_name is None:
235
+ logger.warning(
236
+ "\nNote: No active token is set and no environment variable `HF_TOKEN` is found. Use `hf auth login` to log in."
237
+ )
238
+
239
+
240
+ ###
241
+ # Interpreter-based login (text)
242
+ ###
243
+
244
+
245
+ @_deprecate_arguments(
246
+ version="1.0",
247
+ deprecated_args="write_permission",
248
+ custom_message="Fine-grained tokens added complexity to the permissions, making it irrelevant to check if a token has 'write' access.",
249
+ )
250
+ @_deprecate_positional_args(version="1.0")
251
+ def interpreter_login(*, new_session: bool = True, write_permission: bool = False) -> None:
252
+ """
253
+ Displays a prompt to log in to the HF website and store the token.
254
+
255
+ This is equivalent to [`login`] without passing a token when not run in a notebook.
256
+ [`interpreter_login`] is useful if you want to force the use of the terminal prompt
257
+ instead of a notebook widget.
258
+
259
+ For more details, see [`login`].
260
+
261
+ Args:
262
+ new_session (`bool`, defaults to `True`):
263
+ If `True`, will request a token even if one is already saved on the machine.
264
+ write_permission (`bool`):
265
+ Ignored and deprecated argument.
266
+ """
267
+ if not new_session and get_token() is not None:
268
+ logger.info("User is already logged in.")
269
+ return
270
+
271
+ from .commands.delete_cache import _ask_for_confirmation_no_tui
272
+
273
+ print(_HF_LOGO_ASCII)
274
+ if get_token() is not None:
275
+ logger.info(
276
+ " A token is already saved on your machine. Run `hf auth whoami`"
277
+ " to get more information or `hf auth logout` if you want"
278
+ " to log out."
279
+ )
280
+ logger.info(" Setting a new token will erase the existing one.")
281
+
282
+ logger.info(
283
+ " To log in, `huggingface_hub` requires a token generated from https://huggingface.co/settings/tokens ."
284
+ )
285
+ if os.name == "nt":
286
+ logger.info("Token can be pasted using 'Right-Click'.")
287
+ token = getpass("Enter your token (input will not be visible): ")
288
+ add_to_git_credential = _ask_for_confirmation_no_tui("Add token as git credential?")
289
+
290
+ _login(token=token, add_to_git_credential=add_to_git_credential)
291
+
292
+
293
+ ###
294
+ # Notebook-based login (widget)
295
+ ###
296
+
297
+ NOTEBOOK_LOGIN_PASSWORD_HTML = """<center> <img
298
+ src=https://huggingface.co/front/assets/huggingface_logo-noborder.svg
299
+ alt='Hugging Face'> <br> Immediately click login after typing your password or
300
+ it might be stored in plain text in this notebook file. </center>"""
301
+
302
+
303
+ NOTEBOOK_LOGIN_TOKEN_HTML_START = """<center> <img
304
+ src=https://huggingface.co/front/assets/huggingface_logo-noborder.svg
305
+ alt='Hugging Face'> <br> Copy a token from <a
306
+ href="https://huggingface.co/settings/tokens" target="_blank">your Hugging Face
307
+ tokens page</a> and paste it below. <br> Immediately click login after copying
308
+ your token or it might be stored in plain text in this notebook file. </center>"""
309
+
310
+
311
+ NOTEBOOK_LOGIN_TOKEN_HTML_END = """
312
+ <b>Pro Tip:</b> If you don't already have one, you can create a dedicated
313
+ 'notebooks' token with 'write' access, that you can then easily reuse for all
314
+ notebooks. </center>"""
315
+
316
+
317
+ @_deprecate_arguments(
318
+ version="1.0",
319
+ deprecated_args="write_permission",
320
+ custom_message="Fine-grained tokens added complexity to the permissions, making it irrelevant to check if a token has 'write' access.",
321
+ )
322
+ @_deprecate_positional_args(version="1.0")
323
+ def notebook_login(*, new_session: bool = True, write_permission: bool = False) -> None:
324
+ """
325
+ Displays a widget to log in to the HF website and store the token.
326
+
327
+ This is equivalent to [`login`] without passing a token when run in a notebook.
328
+ [`notebook_login`] is useful if you want to force the use of the notebook widget
329
+ instead of a prompt in the terminal.
330
+
331
+ For more details, see [`login`].
332
+
333
+ Args:
334
+ new_session (`bool`, defaults to `True`):
335
+ If `True`, will request a token even if one is already saved on the machine.
336
+ write_permission (`bool`):
337
+ Ignored and deprecated argument.
338
+ """
339
+ try:
340
+ import ipywidgets.widgets as widgets # type: ignore
341
+ from IPython.display import display # type: ignore
342
+ except ImportError:
343
+ raise ImportError(
344
+ "The `notebook_login` function can only be used in a notebook (Jupyter or"
345
+ " Colab) and you need the `ipywidgets` module: `pip install ipywidgets`."
346
+ )
347
+ if not new_session and get_token() is not None:
348
+ logger.info("User is already logged in.")
349
+ return
350
+
351
+ box_layout = widgets.Layout(display="flex", flex_flow="column", align_items="center", width="50%")
352
+
353
+ token_widget = widgets.Password(description="Token:")
354
+ git_checkbox_widget = widgets.Checkbox(value=True, description="Add token as git credential?")
355
+ token_finish_button = widgets.Button(description="Login")
356
+
357
+ login_token_widget = widgets.VBox(
358
+ [
359
+ widgets.HTML(NOTEBOOK_LOGIN_TOKEN_HTML_START),
360
+ token_widget,
361
+ git_checkbox_widget,
362
+ token_finish_button,
363
+ widgets.HTML(NOTEBOOK_LOGIN_TOKEN_HTML_END),
364
+ ],
365
+ layout=box_layout,
366
+ )
367
+ display(login_token_widget)
368
+
369
+ # On click events
370
+ def login_token_event(t):
371
+ """Event handler for the login button."""
372
+ token = token_widget.value
373
+ add_to_git_credential = git_checkbox_widget.value
374
+ # Erase token and clear value to make sure it's not saved in the notebook.
375
+ token_widget.value = ""
376
+ # Hide inputs
377
+ login_token_widget.children = [widgets.Label("Connecting...")]
378
+ try:
379
+ with capture_output() as captured:
380
+ _login(token, add_to_git_credential=add_to_git_credential)
381
+ message = captured.getvalue()
382
+ except Exception as error:
383
+ message = str(error)
384
+ # Print result (success message or error)
385
+ login_token_widget.children = [widgets.Label(line) for line in message.split("\n") if line.strip()]
386
+
387
+ token_finish_button.on_click(login_token_event)
388
+
389
+
390
+ ###
391
+ # Login private helpers
392
+ ###
393
+
394
+
395
+ def _login(
396
+ token: str,
397
+ add_to_git_credential: bool,
398
+ ) -> None:
399
+ from .hf_api import whoami # avoid circular import
400
+
401
+ if token.startswith("api_org"):
402
+ raise ValueError("You must use your personal account token, not an organization token.")
403
+
404
+ token_info = whoami(token)
405
+ permission = token_info["auth"]["accessToken"]["role"]
406
+ logger.info(f"Token is valid (permission: {permission}).")
407
+
408
+ token_name = token_info["auth"]["accessToken"]["displayName"]
409
+ # Store token locally
410
+ _save_token(token=token, token_name=token_name)
411
+ # Set active token
412
+ _set_active_token(token_name=token_name, add_to_git_credential=add_to_git_credential)
413
+ logger.info("Login successful.")
414
+ if _get_token_from_environment():
415
+ logger.warning(
416
+ "Note: Environment variable`HF_TOKEN` is set and is the current active token independently from the token you've just configured."
417
+ )
418
+ else:
419
+ logger.info(f"The current active token is: `{token_name}`")
420
+
421
+
422
+ def _logout_from_token(token_name: str) -> None:
423
+ """Logout from a specific access token.
424
+
425
+ Args:
426
+ token_name (`str`):
427
+ The name of the access token to logout from.
428
+ Raises:
429
+ [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError):
430
+ If the access token name is not found.
431
+ """
432
+ stored_tokens = get_stored_tokens()
433
+ # If there is no access tokens saved or the access token name is not found, do nothing
434
+ if not stored_tokens or token_name not in stored_tokens:
435
+ return
436
+
437
+ token = stored_tokens.pop(token_name)
438
+ _save_stored_tokens(stored_tokens)
439
+
440
+ if token == _get_token_from_file():
441
+ logger.warning(f"Active token '{token_name}' has been deleted.")
442
+ Path(constants.HF_TOKEN_PATH).unlink(missing_ok=True)
443
+
444
+
445
+ def _set_active_token(
446
+ token_name: str,
447
+ add_to_git_credential: bool,
448
+ ) -> None:
449
+ """Set the active access token.
450
+
451
+ Args:
452
+ token_name (`str`):
453
+ The name of the token to set as active.
454
+ """
455
+ token = _get_token_by_name(token_name)
456
+ if not token:
457
+ raise ValueError(f"Token {token_name} not found in {constants.HF_STORED_TOKENS_PATH}")
458
+ if add_to_git_credential:
459
+ if _is_git_credential_helper_configured():
460
+ set_git_credential(token)
461
+ logger.info(
462
+ "Your token has been saved in your configured git credential helpers"
463
+ + f" ({','.join(list_credential_helpers())})."
464
+ )
465
+ else:
466
+ logger.warning("Token has not been saved to git credential helper.")
467
+ # Write token to HF_TOKEN_PATH
468
+ path = Path(constants.HF_TOKEN_PATH)
469
+ path.parent.mkdir(parents=True, exist_ok=True)
470
+ path.write_text(token)
471
+ logger.info(f"Your token has been saved to {constants.HF_TOKEN_PATH}")
472
+
473
+
474
+ def _is_git_credential_helper_configured() -> bool:
475
+ """Check if a git credential helper is configured.
476
+
477
+ Warns user if not the case (except for Google Colab where "store" is set by default
478
+ by `huggingface_hub`).
479
+ """
480
+ helpers = list_credential_helpers()
481
+ if len(helpers) > 0:
482
+ return True # Do not warn: at least 1 helper is set
483
+
484
+ # Only in Google Colab to avoid the warning message
485
+ # See https://github.com/huggingface/huggingface_hub/issues/1043#issuecomment-1247010710
486
+ if is_google_colab():
487
+ _set_store_as_git_credential_helper_globally()
488
+ return True # Do not warn: "store" is used by default in Google Colab
489
+
490
+ # Otherwise, warn user
491
+ print(
492
+ ANSI.red(
493
+ "Cannot authenticate through git-credential as no helper is defined on your"
494
+ " machine.\nYou might have to re-authenticate when pushing to the Hugging"
495
+ " Face Hub.\nRun the following command in your terminal in case you want to"
496
+ " set the 'store' credential helper as default.\n\ngit config --global"
497
+ " credential.helper store\n\nRead"
498
+ " https://git-scm.com/book/en/v2/Git-Tools-Credential-Storage for more"
499
+ " details."
500
+ )
501
+ )
502
+ return False
503
+
504
+
505
+ def _set_store_as_git_credential_helper_globally() -> None:
506
+ """Set globally the credential.helper to `store`.
507
+
508
+ To be used only in Google Colab as we assume the user doesn't care about the git
509
+ credential config. It is the only particular case where we don't want to display the
510
+ warning message in [`notebook_login()`].
511
+
512
+ Related:
513
+ - https://github.com/huggingface/huggingface_hub/issues/1043
514
+ - https://github.com/huggingface/huggingface_hub/issues/1051
515
+ - https://git-scm.com/docs/git-credential-store
516
+ """
517
+ try:
518
+ run_subprocess("git config --global credential.helper store")
519
+ except subprocess.CalledProcessError as exc:
520
+ raise EnvironmentError(exc.stderr)
venv/lib/python3.13/site-packages/huggingface_hub/_oauth.py ADDED
@@ -0,0 +1,460 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datetime
2
+ import hashlib
3
+ import logging
4
+ import os
5
+ import time
6
+ import urllib.parse
7
+ import warnings
8
+ from dataclasses import dataclass
9
+ from typing import TYPE_CHECKING, Dict, List, Literal, Optional, Tuple, Union
10
+
11
+ from . import constants
12
+ from .hf_api import whoami
13
+ from .utils import experimental, get_token
14
+
15
+
16
+ logger = logging.getLogger(__name__)
17
+
18
+ if TYPE_CHECKING:
19
+ import fastapi
20
+
21
+
22
+ @dataclass
23
+ class OAuthOrgInfo:
24
+ """
25
+ Information about an organization linked to a user logged in with OAuth.
26
+
27
+ Attributes:
28
+ sub (`str`):
29
+ Unique identifier for the org. OpenID Connect field.
30
+ name (`str`):
31
+ The org's full name. OpenID Connect field.
32
+ preferred_username (`str`):
33
+ The org's username. OpenID Connect field.
34
+ picture (`str`):
35
+ The org's profile picture URL. OpenID Connect field.
36
+ is_enterprise (`bool`):
37
+ Whether the org is an enterprise org. Hugging Face field.
38
+ can_pay (`Optional[bool]`, *optional*):
39
+ Whether the org has a payment method set up. Hugging Face field.
40
+ role_in_org (`Optional[str]`, *optional*):
41
+ The user's role in the org. Hugging Face field.
42
+ security_restrictions (`Optional[List[Literal["ip", "token-policy", "mfa", "sso"]]]`, *optional*):
43
+ Array of security restrictions that the user hasn't completed for this org. Possible values: "ip", "token-policy", "mfa", "sso". Hugging Face field.
44
+ """
45
+
46
+ sub: str
47
+ name: str
48
+ preferred_username: str
49
+ picture: str
50
+ is_enterprise: bool
51
+ can_pay: Optional[bool] = None
52
+ role_in_org: Optional[str] = None
53
+ security_restrictions: Optional[List[Literal["ip", "token-policy", "mfa", "sso"]]] = None
54
+
55
+
56
+ @dataclass
57
+ class OAuthUserInfo:
58
+ """
59
+ Information about a user logged in with OAuth.
60
+
61
+ Attributes:
62
+ sub (`str`):
63
+ Unique identifier for the user, even in case of rename. OpenID Connect field.
64
+ name (`str`):
65
+ The user's full name. OpenID Connect field.
66
+ preferred_username (`str`):
67
+ The user's username. OpenID Connect field.
68
+ email_verified (`Optional[bool]`, *optional*):
69
+ Indicates if the user's email is verified. OpenID Connect field.
70
+ email (`Optional[str]`, *optional*):
71
+ The user's email address. OpenID Connect field.
72
+ picture (`str`):
73
+ The user's profile picture URL. OpenID Connect field.
74
+ profile (`str`):
75
+ The user's profile URL. OpenID Connect field.
76
+ website (`Optional[str]`, *optional*):
77
+ The user's website URL. OpenID Connect field.
78
+ is_pro (`bool`):
79
+ Whether the user is a pro user. Hugging Face field.
80
+ can_pay (`Optional[bool]`, *optional*):
81
+ Whether the user has a payment method set up. Hugging Face field.
82
+ orgs (`Optional[List[OrgInfo]]`, *optional*):
83
+ List of organizations the user is part of. Hugging Face field.
84
+ """
85
+
86
+ sub: str
87
+ name: str
88
+ preferred_username: str
89
+ email_verified: Optional[bool]
90
+ email: Optional[str]
91
+ picture: str
92
+ profile: str
93
+ website: Optional[str]
94
+ is_pro: bool
95
+ can_pay: Optional[bool]
96
+ orgs: Optional[List[OAuthOrgInfo]]
97
+
98
+
99
+ @dataclass
100
+ class OAuthInfo:
101
+ """
102
+ Information about the OAuth login.
103
+
104
+ Attributes:
105
+ access_token (`str`):
106
+ The access token.
107
+ access_token_expires_at (`datetime.datetime`):
108
+ The expiration date of the access token.
109
+ user_info ([`OAuthUserInfo`]):
110
+ The user information.
111
+ state (`str`, *optional*):
112
+ State passed to the OAuth provider in the original request to the OAuth provider.
113
+ scope (`str`):
114
+ Granted scope.
115
+ """
116
+
117
+ access_token: str
118
+ access_token_expires_at: datetime.datetime
119
+ user_info: OAuthUserInfo
120
+ state: Optional[str]
121
+ scope: str
122
+
123
+
124
+ @experimental
125
+ def attach_huggingface_oauth(app: "fastapi.FastAPI", route_prefix: str = "/"):
126
+ """
127
+ Add OAuth endpoints to a FastAPI app to enable OAuth login with Hugging Face.
128
+
129
+ How to use:
130
+ - Call this method on your FastAPI app to add the OAuth endpoints.
131
+ - Inside your route handlers, call `parse_huggingface_oauth(request)` to retrieve the OAuth info.
132
+ - If user is logged in, an [`OAuthInfo`] object is returned with the user's info. If not, `None` is returned.
133
+ - In your app, make sure to add links to `/oauth/huggingface/login` and `/oauth/huggingface/logout` for the user to log in and out.
134
+
135
+ Example:
136
+ ```py
137
+ from huggingface_hub import attach_huggingface_oauth, parse_huggingface_oauth
138
+
139
+ # Create a FastAPI app
140
+ app = FastAPI()
141
+
142
+ # Add OAuth endpoints to the FastAPI app
143
+ attach_huggingface_oauth(app)
144
+
145
+ # Add a route that greets the user if they are logged in
146
+ @app.get("/")
147
+ def greet_json(request: Request):
148
+ # Retrieve the OAuth info from the request
149
+ oauth_info = parse_huggingface_oauth(request) # e.g. OAuthInfo dataclass
150
+ if oauth_info is None:
151
+ return {"msg": "Not logged in!"}
152
+ return {"msg": f"Hello, {oauth_info.user_info.preferred_username}!"}
153
+ ```
154
+ """
155
+ # TODO: handle generic case (handling OAuth in a non-Space environment with custom dev values) (low priority)
156
+
157
+ # Add SessionMiddleware to the FastAPI app to store the OAuth info in the session.
158
+ # Session Middleware requires a secret key to sign the cookies. Let's use a hash
159
+ # of the OAuth secret key to make it unique to the Space + updated in case OAuth
160
+ # config gets updated. When ran locally, we use an empty string as a secret key.
161
+ try:
162
+ from starlette.middleware.sessions import SessionMiddleware
163
+ except ImportError as e:
164
+ raise ImportError(
165
+ "Cannot initialize OAuth to due a missing library. Please run `pip install huggingface_hub[oauth]` or add "
166
+ "`huggingface_hub[oauth]` to your requirements.txt file in order to install the required dependencies."
167
+ ) from e
168
+ session_secret = (constants.OAUTH_CLIENT_SECRET or "") + "-v1"
169
+ app.add_middleware(
170
+ SessionMiddleware, # type: ignore[arg-type]
171
+ secret_key=hashlib.sha256(session_secret.encode()).hexdigest(),
172
+ same_site="none",
173
+ https_only=True,
174
+ ) # type: ignore
175
+
176
+ # Add OAuth endpoints to the FastAPI app:
177
+ # - {route_prefix}/oauth/huggingface/login
178
+ # - {route_prefix}/oauth/huggingface/callback
179
+ # - {route_prefix}/oauth/huggingface/logout
180
+ # If the app is running in a Space, OAuth is enabled normally.
181
+ # Otherwise, we mock the endpoints to make the user log in with a fake user profile - without any calls to hf.co.
182
+ route_prefix = route_prefix.strip("/")
183
+ if os.getenv("SPACE_ID") is not None:
184
+ logger.info("OAuth is enabled in the Space. Adding OAuth routes.")
185
+ _add_oauth_routes(app, route_prefix=route_prefix)
186
+ else:
187
+ logger.info("App is not running in a Space. Adding mocked OAuth routes.")
188
+ _add_mocked_oauth_routes(app, route_prefix=route_prefix)
189
+
190
+
191
+ def parse_huggingface_oauth(request: "fastapi.Request") -> Optional[OAuthInfo]:
192
+ """
193
+ Returns the information from a logged in user as a [`OAuthInfo`] object.
194
+
195
+ For flexibility and future-proofing, this method is very lax in its parsing and does not raise errors.
196
+ Missing fields are set to `None` without a warning.
197
+
198
+ Return `None`, if the user is not logged in (no info in session cookie).
199
+
200
+ See [`attach_huggingface_oauth`] for an example on how to use this method.
201
+ """
202
+ if "oauth_info" not in request.session:
203
+ logger.debug("No OAuth info in session.")
204
+ return None
205
+
206
+ logger.debug("Parsing OAuth info from session.")
207
+ oauth_data = request.session["oauth_info"]
208
+ user_data = oauth_data.get("userinfo", {})
209
+ orgs_data = user_data.get("orgs", [])
210
+
211
+ orgs = (
212
+ [
213
+ OAuthOrgInfo(
214
+ sub=org.get("sub"),
215
+ name=org.get("name"),
216
+ preferred_username=org.get("preferred_username"),
217
+ picture=org.get("picture"),
218
+ is_enterprise=org.get("isEnterprise"),
219
+ can_pay=org.get("canPay"),
220
+ role_in_org=org.get("roleInOrg"),
221
+ security_restrictions=org.get("securityRestrictions"),
222
+ )
223
+ for org in orgs_data
224
+ ]
225
+ if orgs_data
226
+ else None
227
+ )
228
+
229
+ user_info = OAuthUserInfo(
230
+ sub=user_data.get("sub"),
231
+ name=user_data.get("name"),
232
+ preferred_username=user_data.get("preferred_username"),
233
+ email_verified=user_data.get("email_verified"),
234
+ email=user_data.get("email"),
235
+ picture=user_data.get("picture"),
236
+ profile=user_data.get("profile"),
237
+ website=user_data.get("website"),
238
+ is_pro=user_data.get("isPro"),
239
+ can_pay=user_data.get("canPay"),
240
+ orgs=orgs,
241
+ )
242
+
243
+ return OAuthInfo(
244
+ access_token=oauth_data.get("access_token"),
245
+ access_token_expires_at=datetime.datetime.fromtimestamp(oauth_data.get("expires_at")),
246
+ user_info=user_info,
247
+ state=oauth_data.get("state"),
248
+ scope=oauth_data.get("scope"),
249
+ )
250
+
251
+
252
+ def _add_oauth_routes(app: "fastapi.FastAPI", route_prefix: str) -> None:
253
+ """Add OAuth routes to the FastAPI app (login, callback handler and logout)."""
254
+ try:
255
+ import fastapi
256
+ from authlib.integrations.base_client.errors import MismatchingStateError
257
+ from authlib.integrations.starlette_client import OAuth
258
+ from fastapi.responses import RedirectResponse
259
+ except ImportError as e:
260
+ raise ImportError(
261
+ "Cannot initialize OAuth to due a missing library. Please run `pip install huggingface_hub[oauth]` or add "
262
+ "`huggingface_hub[oauth]` to your requirements.txt file."
263
+ ) from e
264
+
265
+ # Check environment variables
266
+ msg = (
267
+ "OAuth is required but '{}' environment variable is not set. Make sure you've enabled OAuth in your Space by"
268
+ " setting `hf_oauth: true` in the Space metadata."
269
+ )
270
+ if constants.OAUTH_CLIENT_ID is None:
271
+ raise ValueError(msg.format("OAUTH_CLIENT_ID"))
272
+ if constants.OAUTH_CLIENT_SECRET is None:
273
+ raise ValueError(msg.format("OAUTH_CLIENT_SECRET"))
274
+ if constants.OAUTH_SCOPES is None:
275
+ raise ValueError(msg.format("OAUTH_SCOPES"))
276
+ if constants.OPENID_PROVIDER_URL is None:
277
+ raise ValueError(msg.format("OPENID_PROVIDER_URL"))
278
+
279
+ # Register OAuth server
280
+ oauth = OAuth()
281
+ oauth.register(
282
+ name="huggingface",
283
+ client_id=constants.OAUTH_CLIENT_ID,
284
+ client_secret=constants.OAUTH_CLIENT_SECRET,
285
+ client_kwargs={"scope": constants.OAUTH_SCOPES},
286
+ server_metadata_url=constants.OPENID_PROVIDER_URL + "/.well-known/openid-configuration",
287
+ )
288
+
289
+ login_uri, callback_uri, logout_uri = _get_oauth_uris(route_prefix)
290
+
291
+ # Register OAuth endpoints
292
+ @app.get(login_uri)
293
+ async def oauth_login(request: fastapi.Request) -> RedirectResponse:
294
+ """Endpoint that redirects to HF OAuth page."""
295
+ redirect_uri = _generate_redirect_uri(request)
296
+ return await oauth.huggingface.authorize_redirect(request, redirect_uri) # type: ignore
297
+
298
+ @app.get(callback_uri)
299
+ async def oauth_redirect_callback(request: fastapi.Request) -> RedirectResponse:
300
+ """Endpoint that handles the OAuth callback."""
301
+ try:
302
+ oauth_info = await oauth.huggingface.authorize_access_token(request) # type: ignore
303
+ except MismatchingStateError:
304
+ # Parse query params
305
+ nb_redirects = int(request.query_params.get("_nb_redirects", 0))
306
+ target_url = request.query_params.get("_target_url")
307
+
308
+ # Build redirect URI with the same query params as before and bump nb_redirects count
309
+ query_params: Dict[str, Union[int, str]] = {"_nb_redirects": nb_redirects + 1}
310
+ if target_url:
311
+ query_params["_target_url"] = target_url
312
+
313
+ redirect_uri = f"{login_uri}?{urllib.parse.urlencode(query_params)}"
314
+
315
+ # If the user is redirected more than 3 times, it is very likely that the cookie is not working properly.
316
+ # (e.g. browser is blocking third-party cookies in iframe). In this case, redirect the user in the
317
+ # non-iframe view.
318
+ if nb_redirects > constants.OAUTH_MAX_REDIRECTS:
319
+ host = os.environ.get("SPACE_HOST")
320
+ if host is None: # cannot happen in a Space
321
+ raise RuntimeError(
322
+ "App is not running in a Space (SPACE_HOST environment variable is not set). Cannot redirect to non-iframe view."
323
+ ) from None
324
+ host_url = "https://" + host.rstrip("/")
325
+ return RedirectResponse(host_url + redirect_uri)
326
+
327
+ # Redirect the user to the login page again
328
+ return RedirectResponse(redirect_uri)
329
+
330
+ # OAuth login worked => store the user info in the session and redirect
331
+ logger.debug("Successfully logged in with OAuth. Storing user info in session.")
332
+ request.session["oauth_info"] = oauth_info
333
+ return RedirectResponse(_get_redirect_target(request))
334
+
335
+ @app.get(logout_uri)
336
+ async def oauth_logout(request: fastapi.Request) -> RedirectResponse:
337
+ """Endpoint that logs out the user (e.g. delete info from cookie session)."""
338
+ logger.debug("Logged out with OAuth. Removing user info from session.")
339
+ request.session.pop("oauth_info", None)
340
+ return RedirectResponse(_get_redirect_target(request))
341
+
342
+
343
+ def _add_mocked_oauth_routes(app: "fastapi.FastAPI", route_prefix: str = "/") -> None:
344
+ """Add fake oauth routes if app is run locally and OAuth is enabled.
345
+
346
+ Using OAuth will have the same behavior as in a Space but instead of authenticating with HF, a mocked user profile
347
+ is added to the session.
348
+ """
349
+ try:
350
+ import fastapi
351
+ from fastapi.responses import RedirectResponse
352
+ from starlette.datastructures import URL
353
+ except ImportError as e:
354
+ raise ImportError(
355
+ "Cannot initialize OAuth to due a missing library. Please run `pip install huggingface_hub[oauth]` or add "
356
+ "`huggingface_hub[oauth]` to your requirements.txt file."
357
+ ) from e
358
+
359
+ warnings.warn(
360
+ "OAuth is not supported outside of a Space environment. To help you debug your app locally, the oauth endpoints"
361
+ " are mocked to return your profile and token. To make it work, your machine must be logged in to Huggingface."
362
+ )
363
+ mocked_oauth_info = _get_mocked_oauth_info()
364
+
365
+ login_uri, callback_uri, logout_uri = _get_oauth_uris(route_prefix)
366
+
367
+ # Define OAuth routes
368
+ @app.get(login_uri)
369
+ async def oauth_login(request: fastapi.Request) -> RedirectResponse:
370
+ """Fake endpoint that redirects to HF OAuth page."""
371
+ # Define target (where to redirect after login)
372
+ redirect_uri = _generate_redirect_uri(request)
373
+ return RedirectResponse(callback_uri + "?" + urllib.parse.urlencode({"_target_url": redirect_uri}))
374
+
375
+ @app.get(callback_uri)
376
+ async def oauth_redirect_callback(request: fastapi.Request) -> RedirectResponse:
377
+ """Endpoint that handles the OAuth callback."""
378
+ request.session["oauth_info"] = mocked_oauth_info
379
+ return RedirectResponse(_get_redirect_target(request))
380
+
381
+ @app.get(logout_uri)
382
+ async def oauth_logout(request: fastapi.Request) -> RedirectResponse:
383
+ """Endpoint that logs out the user (e.g. delete cookie session)."""
384
+ request.session.pop("oauth_info", None)
385
+ logout_url = URL("/").include_query_params(**request.query_params)
386
+ return RedirectResponse(url=logout_url, status_code=302) # see https://github.com/gradio-app/gradio/pull/9659
387
+
388
+
389
+ def _generate_redirect_uri(request: "fastapi.Request") -> str:
390
+ if "_target_url" in request.query_params:
391
+ # if `_target_url` already in query params => respect it
392
+ target = request.query_params["_target_url"]
393
+ else:
394
+ # otherwise => keep query params
395
+ target = "/?" + urllib.parse.urlencode(request.query_params)
396
+
397
+ redirect_uri = request.url_for("oauth_redirect_callback").include_query_params(_target_url=target)
398
+ redirect_uri_as_str = str(redirect_uri)
399
+ if redirect_uri.netloc.endswith(".hf.space"):
400
+ # In Space, FastAPI redirect as http but we want https
401
+ redirect_uri_as_str = redirect_uri_as_str.replace("http://", "https://")
402
+ return redirect_uri_as_str
403
+
404
+
405
+ def _get_redirect_target(request: "fastapi.Request", default_target: str = "/") -> str:
406
+ return request.query_params.get("_target_url", default_target)
407
+
408
+
409
+ def _get_mocked_oauth_info() -> Dict:
410
+ token = get_token()
411
+ if token is None:
412
+ raise ValueError(
413
+ "Your machine must be logged in to HF to debug an OAuth app locally. Please"
414
+ " run `hf auth login` or set `HF_TOKEN` as environment variable "
415
+ "with one of your access token. You can generate a new token in your "
416
+ "settings page (https://huggingface.co/settings/tokens)."
417
+ )
418
+
419
+ user = whoami()
420
+ if user["type"] != "user":
421
+ raise ValueError(
422
+ "Your machine is not logged in with a personal account. Please use a "
423
+ "personal access token. You can generate a new token in your settings page"
424
+ " (https://huggingface.co/settings/tokens)."
425
+ )
426
+
427
+ return {
428
+ "access_token": token,
429
+ "token_type": "bearer",
430
+ "expires_in": 8 * 60 * 60, # 8 hours
431
+ "id_token": "FOOBAR",
432
+ "scope": "openid profile",
433
+ "refresh_token": "hf_oauth__refresh_token",
434
+ "expires_at": int(time.time()) + 8 * 60 * 60, # 8 hours
435
+ "userinfo": {
436
+ "sub": "0123456789",
437
+ "name": user["fullname"],
438
+ "preferred_username": user["name"],
439
+ "profile": f"https://huggingface.co/{user['name']}",
440
+ "picture": user["avatarUrl"],
441
+ "website": "",
442
+ "aud": "00000000-0000-0000-0000-000000000000",
443
+ "auth_time": 1691672844,
444
+ "nonce": "aaaaaaaaaaaaaaaaaaa",
445
+ "iat": 1691672844,
446
+ "exp": 1691676444,
447
+ "iss": "https://huggingface.co",
448
+ },
449
+ }
450
+
451
+
452
+ def _get_oauth_uris(route_prefix: str = "/") -> Tuple[str, str, str]:
453
+ route_prefix = route_prefix.strip("/")
454
+ if route_prefix:
455
+ route_prefix = f"/{route_prefix}"
456
+ return (
457
+ f"{route_prefix}/oauth/huggingface/login",
458
+ f"{route_prefix}/oauth/huggingface/callback",
459
+ f"{route_prefix}/oauth/huggingface/logout",
460
+ )
venv/lib/python3.13/site-packages/huggingface_hub/_snapshot_download.py ADDED
@@ -0,0 +1,343 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from pathlib import Path
3
+ from typing import Dict, Iterable, List, Literal, Optional, Type, Union
4
+
5
+ import requests
6
+ from tqdm.auto import tqdm as base_tqdm
7
+ from tqdm.contrib.concurrent import thread_map
8
+
9
+ from . import constants
10
+ from .errors import (
11
+ GatedRepoError,
12
+ HfHubHTTPError,
13
+ LocalEntryNotFoundError,
14
+ RepositoryNotFoundError,
15
+ RevisionNotFoundError,
16
+ )
17
+ from .file_download import REGEX_COMMIT_HASH, hf_hub_download, repo_folder_name
18
+ from .hf_api import DatasetInfo, HfApi, ModelInfo, RepoFile, SpaceInfo
19
+ from .utils import OfflineModeIsEnabled, filter_repo_objects, logging, validate_hf_hub_args
20
+ from .utils import tqdm as hf_tqdm
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+ VERY_LARGE_REPO_THRESHOLD = 50000 # After this limit, we don't consider `repo_info.siblings` to be reliable enough
26
+
27
+
28
+ @validate_hf_hub_args
29
+ def snapshot_download(
30
+ repo_id: str,
31
+ *,
32
+ repo_type: Optional[str] = None,
33
+ revision: Optional[str] = None,
34
+ cache_dir: Union[str, Path, None] = None,
35
+ local_dir: Union[str, Path, None] = None,
36
+ library_name: Optional[str] = None,
37
+ library_version: Optional[str] = None,
38
+ user_agent: Optional[Union[Dict, str]] = None,
39
+ proxies: Optional[Dict] = None,
40
+ etag_timeout: float = constants.DEFAULT_ETAG_TIMEOUT,
41
+ force_download: bool = False,
42
+ token: Optional[Union[bool, str]] = None,
43
+ local_files_only: bool = False,
44
+ allow_patterns: Optional[Union[List[str], str]] = None,
45
+ ignore_patterns: Optional[Union[List[str], str]] = None,
46
+ max_workers: int = 8,
47
+ tqdm_class: Optional[Type[base_tqdm]] = None,
48
+ headers: Optional[Dict[str, str]] = None,
49
+ endpoint: Optional[str] = None,
50
+ # Deprecated args
51
+ local_dir_use_symlinks: Union[bool, Literal["auto"]] = "auto",
52
+ resume_download: Optional[bool] = None,
53
+ ) -> str:
54
+ """Download repo files.
55
+
56
+ Download a whole snapshot of a repo's files at the specified revision. This is useful when you want all files from
57
+ a repo, because you don't know which ones you will need a priori. All files are nested inside a folder in order
58
+ to keep their actual filename relative to that folder. You can also filter which files to download using
59
+ `allow_patterns` and `ignore_patterns`.
60
+
61
+ If `local_dir` is provided, the file structure from the repo will be replicated in this location. When using this
62
+ option, the `cache_dir` will not be used and a `.cache/huggingface/` folder will be created at the root of `local_dir`
63
+ to store some metadata related to the downloaded files. While this mechanism is not as robust as the main
64
+ cache-system, it's optimized for regularly pulling the latest version of a repository.
65
+
66
+ An alternative would be to clone the repo but this requires git and git-lfs to be installed and properly
67
+ configured. It is also not possible to filter which files to download when cloning a repository using git.
68
+
69
+ Args:
70
+ repo_id (`str`):
71
+ A user or an organization name and a repo name separated by a `/`.
72
+ repo_type (`str`, *optional*):
73
+ Set to `"dataset"` or `"space"` if downloading from a dataset or space,
74
+ `None` or `"model"` if downloading from a model. Default is `None`.
75
+ revision (`str`, *optional*):
76
+ An optional Git revision id which can be a branch name, a tag, or a
77
+ commit hash.
78
+ cache_dir (`str`, `Path`, *optional*):
79
+ Path to the folder where cached files are stored.
80
+ local_dir (`str` or `Path`, *optional*):
81
+ If provided, the downloaded files will be placed under this directory.
82
+ library_name (`str`, *optional*):
83
+ The name of the library to which the object corresponds.
84
+ library_version (`str`, *optional*):
85
+ The version of the library.
86
+ user_agent (`str`, `dict`, *optional*):
87
+ The user-agent info in the form of a dictionary or a string.
88
+ proxies (`dict`, *optional*):
89
+ Dictionary mapping protocol to the URL of the proxy passed to
90
+ `requests.request`.
91
+ etag_timeout (`float`, *optional*, defaults to `10`):
92
+ When fetching ETag, how many seconds to wait for the server to send
93
+ data before giving up which is passed to `requests.request`.
94
+ force_download (`bool`, *optional*, defaults to `False`):
95
+ Whether the file should be downloaded even if it already exists in the local cache.
96
+ token (`str`, `bool`, *optional*):
97
+ A token to be used for the download.
98
+ - If `True`, the token is read from the HuggingFace config
99
+ folder.
100
+ - If a string, it's used as the authentication token.
101
+ headers (`dict`, *optional*):
102
+ Additional headers to include in the request. Those headers take precedence over the others.
103
+ local_files_only (`bool`, *optional*, defaults to `False`):
104
+ If `True`, avoid downloading the file and return the path to the
105
+ local cached file if it exists.
106
+ allow_patterns (`List[str]` or `str`, *optional*):
107
+ If provided, only files matching at least one pattern are downloaded.
108
+ ignore_patterns (`List[str]` or `str`, *optional*):
109
+ If provided, files matching any of the patterns are not downloaded.
110
+ max_workers (`int`, *optional*):
111
+ Number of concurrent threads to download files (1 thread = 1 file download).
112
+ Defaults to 8.
113
+ tqdm_class (`tqdm`, *optional*):
114
+ If provided, overwrites the default behavior for the progress bar. Passed
115
+ argument must inherit from `tqdm.auto.tqdm` or at least mimic its behavior.
116
+ Note that the `tqdm_class` is not passed to each individual download.
117
+ Defaults to the custom HF progress bar that can be disabled by setting
118
+ `HF_HUB_DISABLE_PROGRESS_BARS` environment variable.
119
+
120
+ Returns:
121
+ `str`: folder path of the repo snapshot.
122
+
123
+ Raises:
124
+ [`~utils.RepositoryNotFoundError`]
125
+ If the repository to download from cannot be found. This may be because it doesn't exist,
126
+ or because it is set to `private` and you do not have access.
127
+ [`~utils.RevisionNotFoundError`]
128
+ If the revision to download from cannot be found.
129
+ [`EnvironmentError`](https://docs.python.org/3/library/exceptions.html#EnvironmentError)
130
+ If `token=True` and the token cannot be found.
131
+ [`OSError`](https://docs.python.org/3/library/exceptions.html#OSError) if
132
+ ETag cannot be determined.
133
+ [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
134
+ if some parameter value is invalid.
135
+ """
136
+ if cache_dir is None:
137
+ cache_dir = constants.HF_HUB_CACHE
138
+ if revision is None:
139
+ revision = constants.DEFAULT_REVISION
140
+ if isinstance(cache_dir, Path):
141
+ cache_dir = str(cache_dir)
142
+
143
+ if repo_type is None:
144
+ repo_type = "model"
145
+ if repo_type not in constants.REPO_TYPES:
146
+ raise ValueError(f"Invalid repo type: {repo_type}. Accepted repo types are: {str(constants.REPO_TYPES)}")
147
+
148
+ storage_folder = os.path.join(cache_dir, repo_folder_name(repo_id=repo_id, repo_type=repo_type))
149
+
150
+ api = HfApi(
151
+ library_name=library_name,
152
+ library_version=library_version,
153
+ user_agent=user_agent,
154
+ endpoint=endpoint,
155
+ headers=headers,
156
+ token=token,
157
+ )
158
+
159
+ repo_info: Union[ModelInfo, DatasetInfo, SpaceInfo, None] = None
160
+ api_call_error: Optional[Exception] = None
161
+ if not local_files_only:
162
+ # try/except logic to handle different errors => taken from `hf_hub_download`
163
+ try:
164
+ # if we have internet connection we want to list files to download
165
+ repo_info = api.repo_info(repo_id=repo_id, repo_type=repo_type, revision=revision)
166
+ except (requests.exceptions.SSLError, requests.exceptions.ProxyError):
167
+ # Actually raise for those subclasses of ConnectionError
168
+ raise
169
+ except (
170
+ requests.exceptions.ConnectionError,
171
+ requests.exceptions.Timeout,
172
+ OfflineModeIsEnabled,
173
+ ) as error:
174
+ # Internet connection is down
175
+ # => will try to use local files only
176
+ api_call_error = error
177
+ pass
178
+ except RevisionNotFoundError:
179
+ # The repo was found but the revision doesn't exist on the Hub (never existed or got deleted)
180
+ raise
181
+ except requests.HTTPError as error:
182
+ # Multiple reasons for an http error:
183
+ # - Repository is private and invalid/missing token sent
184
+ # - Repository is gated and invalid/missing token sent
185
+ # - Hub is down (error 500 or 504)
186
+ # => let's switch to 'local_files_only=True' to check if the files are already cached.
187
+ # (if it's not the case, the error will be re-raised)
188
+ api_call_error = error
189
+ pass
190
+
191
+ # At this stage, if `repo_info` is None it means either:
192
+ # - internet connection is down
193
+ # - internet connection is deactivated (local_files_only=True or HF_HUB_OFFLINE=True)
194
+ # - repo is private/gated and invalid/missing token sent
195
+ # - Hub is down
196
+ # => let's look if we can find the appropriate folder in the cache:
197
+ # - if the specified revision is a commit hash, look inside "snapshots".
198
+ # - f the specified revision is a branch or tag, look inside "refs".
199
+ # => if local_dir is not None, we will return the path to the local folder if it exists.
200
+ if repo_info is None:
201
+ # Try to get which commit hash corresponds to the specified revision
202
+ commit_hash = None
203
+ if REGEX_COMMIT_HASH.match(revision):
204
+ commit_hash = revision
205
+ else:
206
+ ref_path = os.path.join(storage_folder, "refs", revision)
207
+ if os.path.exists(ref_path):
208
+ # retrieve commit_hash from refs file
209
+ with open(ref_path) as f:
210
+ commit_hash = f.read()
211
+
212
+ # Try to locate snapshot folder for this commit hash
213
+ if commit_hash is not None and local_dir is None:
214
+ snapshot_folder = os.path.join(storage_folder, "snapshots", commit_hash)
215
+ if os.path.exists(snapshot_folder):
216
+ # Snapshot folder exists => let's return it
217
+ # (but we can't check if all the files are actually there)
218
+ return snapshot_folder
219
+
220
+ # If local_dir is not None, return it if it exists and is not empty
221
+ if local_dir is not None:
222
+ local_dir = Path(local_dir)
223
+ if local_dir.is_dir() and any(local_dir.iterdir()):
224
+ logger.warning(
225
+ f"Returning existing local_dir `{local_dir}` as remote repo cannot be accessed in `snapshot_download` ({api_call_error})."
226
+ )
227
+ return str(local_dir.resolve())
228
+ # If we couldn't find the appropriate folder on disk, raise an error.
229
+ if local_files_only:
230
+ raise LocalEntryNotFoundError(
231
+ "Cannot find an appropriate cached snapshot folder for the specified revision on the local disk and "
232
+ "outgoing traffic has been disabled. To enable repo look-ups and downloads online, pass "
233
+ "'local_files_only=False' as input."
234
+ )
235
+ elif isinstance(api_call_error, OfflineModeIsEnabled):
236
+ raise LocalEntryNotFoundError(
237
+ "Cannot find an appropriate cached snapshot folder for the specified revision on the local disk and "
238
+ "outgoing traffic has been disabled. To enable repo look-ups and downloads online, set "
239
+ "'HF_HUB_OFFLINE=0' as environment variable."
240
+ ) from api_call_error
241
+ elif isinstance(api_call_error, (RepositoryNotFoundError, GatedRepoError)) or (
242
+ isinstance(api_call_error, HfHubHTTPError) and api_call_error.response.status_code == 401
243
+ ):
244
+ # Repo not found, gated, or specific authentication error => let's raise the actual error
245
+ raise api_call_error
246
+ else:
247
+ # Otherwise: most likely a connection issue or Hub downtime => let's warn the user
248
+ raise LocalEntryNotFoundError(
249
+ "An error happened while trying to locate the files on the Hub and we cannot find the appropriate"
250
+ " snapshot folder for the specified revision on the local disk. Please check your internet connection"
251
+ " and try again."
252
+ ) from api_call_error
253
+
254
+ # At this stage, internet connection is up and running
255
+ # => let's download the files!
256
+ assert repo_info.sha is not None, "Repo info returned from server must have a revision sha."
257
+
258
+ # Corner case: on very large repos, the siblings list in `repo_info` might not contain all files.
259
+ # In that case, we need to use the `list_repo_tree` method to prevent caching issues.
260
+ repo_files: Iterable[str] = [f.rfilename for f in repo_info.siblings] if repo_info.siblings is not None else []
261
+ unreliable_nb_files = (
262
+ repo_info.siblings is None
263
+ or len(repo_info.siblings) == 0
264
+ or len(repo_info.siblings) > VERY_LARGE_REPO_THRESHOLD
265
+ )
266
+ if unreliable_nb_files:
267
+ logger.info(
268
+ "Number of files in the repo is unreliable. Using `list_repo_tree` to ensure all files are listed."
269
+ )
270
+ repo_files = (
271
+ f.rfilename
272
+ for f in api.list_repo_tree(repo_id=repo_id, recursive=True, revision=revision, repo_type=repo_type)
273
+ if isinstance(f, RepoFile)
274
+ )
275
+
276
+ filtered_repo_files: Iterable[str] = filter_repo_objects(
277
+ items=repo_files,
278
+ allow_patterns=allow_patterns,
279
+ ignore_patterns=ignore_patterns,
280
+ )
281
+
282
+ if not unreliable_nb_files:
283
+ filtered_repo_files = list(filtered_repo_files)
284
+ tqdm_desc = f"Fetching {len(filtered_repo_files)} files"
285
+ else:
286
+ tqdm_desc = "Fetching ... files"
287
+
288
+ commit_hash = repo_info.sha
289
+ snapshot_folder = os.path.join(storage_folder, "snapshots", commit_hash)
290
+ # if passed revision is not identical to commit_hash
291
+ # then revision has to be a branch name or tag name.
292
+ # In that case store a ref.
293
+ if revision != commit_hash:
294
+ ref_path = os.path.join(storage_folder, "refs", revision)
295
+ try:
296
+ os.makedirs(os.path.dirname(ref_path), exist_ok=True)
297
+ with open(ref_path, "w") as f:
298
+ f.write(commit_hash)
299
+ except OSError as e:
300
+ logger.warning(f"Ignored error while writing commit hash to {ref_path}: {e}.")
301
+
302
+ # we pass the commit_hash to hf_hub_download
303
+ # so no network call happens if we already
304
+ # have the file locally.
305
+ def _inner_hf_hub_download(repo_file: str):
306
+ return hf_hub_download(
307
+ repo_id,
308
+ filename=repo_file,
309
+ repo_type=repo_type,
310
+ revision=commit_hash,
311
+ endpoint=endpoint,
312
+ cache_dir=cache_dir,
313
+ local_dir=local_dir,
314
+ local_dir_use_symlinks=local_dir_use_symlinks,
315
+ library_name=library_name,
316
+ library_version=library_version,
317
+ user_agent=user_agent,
318
+ proxies=proxies,
319
+ etag_timeout=etag_timeout,
320
+ resume_download=resume_download,
321
+ force_download=force_download,
322
+ token=token,
323
+ headers=headers,
324
+ )
325
+
326
+ if constants.HF_HUB_ENABLE_HF_TRANSFER:
327
+ # when using hf_transfer we don't want extra parallelism
328
+ # from the one hf_transfer provides
329
+ for file in filtered_repo_files:
330
+ _inner_hf_hub_download(file)
331
+ else:
332
+ thread_map(
333
+ _inner_hf_hub_download,
334
+ filtered_repo_files,
335
+ desc=tqdm_desc,
336
+ max_workers=max_workers,
337
+ # User can use its own tqdm class or the default one from `huggingface_hub.utils`
338
+ tqdm_class=tqdm_class or hf_tqdm,
339
+ )
340
+
341
+ if local_dir is not None:
342
+ return str(os.path.realpath(local_dir))
343
+ return snapshot_folder
venv/lib/python3.13/site-packages/huggingface_hub/_space_api.py ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2019-present, the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ from dataclasses import dataclass
16
+ from datetime import datetime
17
+ from enum import Enum
18
+ from typing import Dict, Optional
19
+
20
+ from huggingface_hub.utils import parse_datetime
21
+
22
+
23
+ class SpaceStage(str, Enum):
24
+ """
25
+ Enumeration of possible stage of a Space on the Hub.
26
+
27
+ Value can be compared to a string:
28
+ ```py
29
+ assert SpaceStage.BUILDING == "BUILDING"
30
+ ```
31
+
32
+ Taken from https://github.com/huggingface/moon-landing/blob/main/server/repo_types/SpaceInfo.ts#L61 (private url).
33
+ """
34
+
35
+ # Copied from moon-landing > server > repo_types > SpaceInfo.ts (private repo)
36
+ NO_APP_FILE = "NO_APP_FILE"
37
+ CONFIG_ERROR = "CONFIG_ERROR"
38
+ BUILDING = "BUILDING"
39
+ BUILD_ERROR = "BUILD_ERROR"
40
+ RUNNING = "RUNNING"
41
+ RUNNING_BUILDING = "RUNNING_BUILDING"
42
+ RUNTIME_ERROR = "RUNTIME_ERROR"
43
+ DELETING = "DELETING"
44
+ STOPPED = "STOPPED"
45
+ PAUSED = "PAUSED"
46
+
47
+
48
+ class SpaceHardware(str, Enum):
49
+ """
50
+ Enumeration of hardwares available to run your Space on the Hub.
51
+
52
+ Value can be compared to a string:
53
+ ```py
54
+ assert SpaceHardware.CPU_BASIC == "cpu-basic"
55
+ ```
56
+
57
+ Taken from https://github.com/huggingface-internal/moon-landing/blob/main/server/repo_types/SpaceHardwareFlavor.ts (private url).
58
+ """
59
+
60
+ # CPU
61
+ CPU_BASIC = "cpu-basic"
62
+ CPU_UPGRADE = "cpu-upgrade"
63
+ CPU_XL = "cpu-xl"
64
+
65
+ # ZeroGPU
66
+ ZERO_A10G = "zero-a10g"
67
+
68
+ # GPU
69
+ T4_SMALL = "t4-small"
70
+ T4_MEDIUM = "t4-medium"
71
+ L4X1 = "l4x1"
72
+ L4X4 = "l4x4"
73
+ L40SX1 = "l40sx1"
74
+ L40SX4 = "l40sx4"
75
+ L40SX8 = "l40sx8"
76
+ A10G_SMALL = "a10g-small"
77
+ A10G_LARGE = "a10g-large"
78
+ A10G_LARGEX2 = "a10g-largex2"
79
+ A10G_LARGEX4 = "a10g-largex4"
80
+ A100_LARGE = "a100-large"
81
+ H100 = "h100"
82
+ H100X8 = "h100x8"
83
+
84
+
85
+ class SpaceStorage(str, Enum):
86
+ """
87
+ Enumeration of persistent storage available for your Space on the Hub.
88
+
89
+ Value can be compared to a string:
90
+ ```py
91
+ assert SpaceStorage.SMALL == "small"
92
+ ```
93
+
94
+ Taken from https://github.com/huggingface/moon-landing/blob/main/server/repo_types/SpaceHardwareFlavor.ts#L24 (private url).
95
+ """
96
+
97
+ SMALL = "small"
98
+ MEDIUM = "medium"
99
+ LARGE = "large"
100
+
101
+
102
+ @dataclass
103
+ class SpaceRuntime:
104
+ """
105
+ Contains information about the current runtime of a Space.
106
+
107
+ Args:
108
+ stage (`str`):
109
+ Current stage of the space. Example: RUNNING.
110
+ hardware (`str` or `None`):
111
+ Current hardware of the space. Example: "cpu-basic". Can be `None` if Space
112
+ is `BUILDING` for the first time.
113
+ requested_hardware (`str` or `None`):
114
+ Requested hardware. Can be different than `hardware` especially if the request
115
+ has just been made. Example: "t4-medium". Can be `None` if no hardware has
116
+ been requested yet.
117
+ sleep_time (`int` or `None`):
118
+ Number of seconds the Space will be kept alive after the last request. By default (if value is `None`), the
119
+ Space will never go to sleep if it's running on an upgraded hardware, while it will go to sleep after 48
120
+ hours on a free 'cpu-basic' hardware. For more details, see https://huggingface.co/docs/hub/spaces-gpus#sleep-time.
121
+ raw (`dict`):
122
+ Raw response from the server. Contains more information about the Space
123
+ runtime like number of replicas, number of cpu, memory size,...
124
+ """
125
+
126
+ stage: SpaceStage
127
+ hardware: Optional[SpaceHardware]
128
+ requested_hardware: Optional[SpaceHardware]
129
+ sleep_time: Optional[int]
130
+ storage: Optional[SpaceStorage]
131
+ raw: Dict
132
+
133
+ def __init__(self, data: Dict) -> None:
134
+ self.stage = data["stage"]
135
+ self.hardware = data.get("hardware", {}).get("current")
136
+ self.requested_hardware = data.get("hardware", {}).get("requested")
137
+ self.sleep_time = data.get("gcTimeout")
138
+ self.storage = data.get("storage")
139
+ self.raw = data
140
+
141
+
142
+ @dataclass
143
+ class SpaceVariable:
144
+ """
145
+ Contains information about the current variables of a Space.
146
+
147
+ Args:
148
+ key (`str`):
149
+ Variable key. Example: `"MODEL_REPO_ID"`
150
+ value (`str`):
151
+ Variable value. Example: `"the_model_repo_id"`.
152
+ description (`str` or None):
153
+ Description of the variable. Example: `"Model Repo ID of the implemented model"`.
154
+ updatedAt (`datetime` or None):
155
+ datetime of the last update of the variable (if the variable has been updated at least once).
156
+ """
157
+
158
+ key: str
159
+ value: str
160
+ description: Optional[str]
161
+ updated_at: Optional[datetime]
162
+
163
+ def __init__(self, key: str, values: Dict) -> None:
164
+ self.key = key
165
+ self.value = values["value"]
166
+ self.description = values.get("description")
167
+ updated_at = values.get("updatedAt")
168
+ self.updated_at = parse_datetime(updated_at) if updated_at is not None else None
venv/lib/python3.13/site-packages/huggingface_hub/_tensorboard_logger.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Contains a logger to push training logs to the Hub, using Tensorboard."""
15
+
16
+ from pathlib import Path
17
+ from typing import List, Optional, Union
18
+
19
+ from ._commit_scheduler import CommitScheduler
20
+ from .errors import EntryNotFoundError
21
+ from .repocard import ModelCard
22
+ from .utils import experimental
23
+
24
+
25
+ # Depending on user's setup, SummaryWriter can come either from 'tensorboardX'
26
+ # or from 'torch.utils.tensorboard'. Both are compatible so let's try to load
27
+ # from either of them.
28
+ try:
29
+ from tensorboardX import SummaryWriter as _RuntimeSummaryWriter
30
+
31
+ is_summary_writer_available = True
32
+ except ImportError:
33
+ try:
34
+ from torch.utils.tensorboard import SummaryWriter as _RuntimeSummaryWriter
35
+
36
+ is_summary_writer_available = True
37
+ except ImportError:
38
+ # Dummy class to avoid failing at import. Will raise on instance creation.
39
+ class _DummySummaryWriter:
40
+ pass
41
+
42
+ _RuntimeSummaryWriter = _DummySummaryWriter # type: ignore[assignment]
43
+ is_summary_writer_available = False
44
+
45
+
46
+ class HFSummaryWriter(_RuntimeSummaryWriter):
47
+ """
48
+ Wrapper around the tensorboard's `SummaryWriter` to push training logs to the Hub.
49
+
50
+ Data is logged locally and then pushed to the Hub asynchronously. Pushing data to the Hub is done in a separate
51
+ thread to avoid blocking the training script. In particular, if the upload fails for any reason (e.g. a connection
52
+ issue), the main script will not be interrupted. Data is automatically pushed to the Hub every `commit_every`
53
+ minutes (default to every 5 minutes).
54
+
55
+ <Tip warning={true}>
56
+
57
+ `HFSummaryWriter` is experimental. Its API is subject to change in the future without prior notice.
58
+
59
+ </Tip>
60
+
61
+ Args:
62
+ repo_id (`str`):
63
+ The id of the repo to which the logs will be pushed.
64
+ logdir (`str`, *optional*):
65
+ The directory where the logs will be written. If not specified, a local directory will be created by the
66
+ underlying `SummaryWriter` object.
67
+ commit_every (`int` or `float`, *optional*):
68
+ The frequency (in minutes) at which the logs will be pushed to the Hub. Defaults to 5 minutes.
69
+ squash_history (`bool`, *optional*):
70
+ Whether to squash the history of the repo after each commit. Defaults to `False`. Squashing commits is
71
+ useful to avoid degraded performances on the repo when it grows too large.
72
+ repo_type (`str`, *optional*):
73
+ The type of the repo to which the logs will be pushed. Defaults to "model".
74
+ repo_revision (`str`, *optional*):
75
+ The revision of the repo to which the logs will be pushed. Defaults to "main".
76
+ repo_private (`bool`, *optional*):
77
+ Whether to make the repo private. If `None` (default), the repo will be public unless the organization's default is private. This value is ignored if the repo already exists.
78
+ path_in_repo (`str`, *optional*):
79
+ The path to the folder in the repo where the logs will be pushed. Defaults to "tensorboard/".
80
+ repo_allow_patterns (`List[str]` or `str`, *optional*):
81
+ A list of patterns to include in the upload. Defaults to `"*.tfevents.*"`. Check out the
82
+ [upload guide](https://huggingface.co/docs/huggingface_hub/guides/upload#upload-a-folder) for more details.
83
+ repo_ignore_patterns (`List[str]` or `str`, *optional*):
84
+ A list of patterns to exclude in the upload. Check out the
85
+ [upload guide](https://huggingface.co/docs/huggingface_hub/guides/upload#upload-a-folder) for more details.
86
+ token (`str`, *optional*):
87
+ Authentication token. Will default to the stored token. See https://huggingface.co/settings/token for more
88
+ details
89
+ kwargs:
90
+ Additional keyword arguments passed to `SummaryWriter`.
91
+
92
+ Examples:
93
+ ```diff
94
+ # Taken from https://pytorch.org/docs/stable/tensorboard.html
95
+ - from torch.utils.tensorboard import SummaryWriter
96
+ + from huggingface_hub import HFSummaryWriter
97
+
98
+ import numpy as np
99
+
100
+ - writer = SummaryWriter()
101
+ + writer = HFSummaryWriter(repo_id="username/my-trained-model")
102
+
103
+ for n_iter in range(100):
104
+ writer.add_scalar('Loss/train', np.random.random(), n_iter)
105
+ writer.add_scalar('Loss/test', np.random.random(), n_iter)
106
+ writer.add_scalar('Accuracy/train', np.random.random(), n_iter)
107
+ writer.add_scalar('Accuracy/test', np.random.random(), n_iter)
108
+ ```
109
+
110
+ ```py
111
+ >>> from huggingface_hub import HFSummaryWriter
112
+
113
+ # Logs are automatically pushed every 15 minutes (5 by default) + when exiting the context manager
114
+ >>> with HFSummaryWriter(repo_id="test_hf_logger", commit_every=15) as logger:
115
+ ... logger.add_scalar("a", 1)
116
+ ... logger.add_scalar("b", 2)
117
+ ```
118
+ """
119
+
120
+ @experimental
121
+ def __new__(cls, *args, **kwargs) -> "HFSummaryWriter":
122
+ if not is_summary_writer_available:
123
+ raise ImportError(
124
+ "You must have `tensorboard` installed to use `HFSummaryWriter`. Please run `pip install --upgrade"
125
+ " tensorboardX` first."
126
+ )
127
+ return super().__new__(cls)
128
+
129
+ def __init__(
130
+ self,
131
+ repo_id: str,
132
+ *,
133
+ logdir: Optional[str] = None,
134
+ commit_every: Union[int, float] = 5,
135
+ squash_history: bool = False,
136
+ repo_type: Optional[str] = None,
137
+ repo_revision: Optional[str] = None,
138
+ repo_private: Optional[bool] = None,
139
+ path_in_repo: Optional[str] = "tensorboard",
140
+ repo_allow_patterns: Optional[Union[List[str], str]] = "*.tfevents.*",
141
+ repo_ignore_patterns: Optional[Union[List[str], str]] = None,
142
+ token: Optional[str] = None,
143
+ **kwargs,
144
+ ):
145
+ # Initialize SummaryWriter
146
+ super().__init__(logdir=logdir, **kwargs)
147
+
148
+ # Check logdir has been correctly initialized and fail early otherwise. In practice, SummaryWriter takes care of it.
149
+ if not isinstance(self.logdir, str):
150
+ raise ValueError(f"`self.logdir` must be a string. Got '{self.logdir}' of type {type(self.logdir)}.")
151
+
152
+ # Append logdir name to `path_in_repo`
153
+ if path_in_repo is None or path_in_repo == "":
154
+ path_in_repo = Path(self.logdir).name
155
+ else:
156
+ path_in_repo = path_in_repo.strip("/") + "/" + Path(self.logdir).name
157
+
158
+ # Initialize scheduler
159
+ self.scheduler = CommitScheduler(
160
+ folder_path=self.logdir,
161
+ path_in_repo=path_in_repo,
162
+ repo_id=repo_id,
163
+ repo_type=repo_type,
164
+ revision=repo_revision,
165
+ private=repo_private,
166
+ token=token,
167
+ allow_patterns=repo_allow_patterns,
168
+ ignore_patterns=repo_ignore_patterns,
169
+ every=commit_every,
170
+ squash_history=squash_history,
171
+ )
172
+
173
+ # Exposing some high-level info at root level
174
+ self.repo_id = self.scheduler.repo_id
175
+ self.repo_type = self.scheduler.repo_type
176
+ self.repo_revision = self.scheduler.revision
177
+
178
+ # Add `hf-summary-writer` tag to the model card metadata
179
+ try:
180
+ card = ModelCard.load(repo_id_or_path=self.repo_id, repo_type=self.repo_type)
181
+ except EntryNotFoundError:
182
+ card = ModelCard("")
183
+ tags = card.data.get("tags", [])
184
+ if "hf-summary-writer" not in tags:
185
+ tags.append("hf-summary-writer")
186
+ card.data["tags"] = tags
187
+ card.push_to_hub(repo_id=self.repo_id, repo_type=self.repo_type)
188
+
189
+ def __exit__(self, exc_type, exc_val, exc_tb):
190
+ """Push to hub in a non-blocking way when exiting the logger's context manager."""
191
+ super().__exit__(exc_type, exc_val, exc_tb)
192
+ future = self.scheduler.trigger()
193
+ future.result()
venv/lib/python3.13/site-packages/huggingface_hub/_upload_large_folder.py ADDED
@@ -0,0 +1,755 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024-present, the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ import enum
16
+ import logging
17
+ import os
18
+ import queue
19
+ import shutil
20
+ import sys
21
+ import threading
22
+ import time
23
+ import traceback
24
+ from datetime import datetime
25
+ from pathlib import Path
26
+ from threading import Lock
27
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
28
+ from urllib.parse import quote
29
+
30
+ from . import constants
31
+ from ._commit_api import CommitOperationAdd, UploadInfo, _fetch_upload_modes
32
+ from ._local_folder import LocalUploadFileMetadata, LocalUploadFilePaths, get_local_upload_paths, read_upload_metadata
33
+ from .constants import DEFAULT_REVISION, REPO_TYPES
34
+ from .utils import DEFAULT_IGNORE_PATTERNS, filter_repo_objects, tqdm
35
+ from .utils._cache_manager import _format_size
36
+ from .utils._runtime import is_xet_available
37
+ from .utils.sha import sha_fileobj
38
+
39
+
40
+ if TYPE_CHECKING:
41
+ from .hf_api import HfApi
42
+
43
+ logger = logging.getLogger(__name__)
44
+
45
+ WAITING_TIME_IF_NO_TASKS = 10 # seconds
46
+ MAX_NB_FILES_FETCH_UPLOAD_MODE = 100
47
+ COMMIT_SIZE_SCALE: List[int] = [20, 50, 75, 100, 125, 200, 250, 400, 600, 1000]
48
+
49
+ UPLOAD_BATCH_SIZE_XET = 256 # Max 256 files per upload batch for XET-enabled repos
50
+ UPLOAD_BATCH_SIZE_LFS = 1 # Otherwise, batches of 1 for regular LFS upload
51
+
52
+ # Repository limits (from https://huggingface.co/docs/hub/repositories-recommendations)
53
+ MAX_FILES_PER_REPO = 100_000 # Recommended maximum number of files per repository
54
+ MAX_FILES_PER_FOLDER = 10_000 # Recommended maximum number of files per folder
55
+ MAX_FILE_SIZE_GB = 50 # Hard limit for individual file size
56
+ RECOMMENDED_FILE_SIZE_GB = 20 # Recommended maximum for individual file size
57
+
58
+
59
+ def _validate_upload_limits(paths_list: List[LocalUploadFilePaths]) -> None:
60
+ """
61
+ Validate upload against repository limits and warn about potential issues.
62
+
63
+ Args:
64
+ paths_list: List of file paths to be uploaded
65
+
66
+ Warns about:
67
+ - Too many files in the repository (>100k)
68
+ - Too many entries (files or subdirectories) in a single folder (>10k)
69
+ - Files exceeding size limits (>20GB recommended, >50GB hard limit)
70
+ """
71
+ logger.info("Running validation checks on files to upload...")
72
+
73
+ # Check 1: Total file count
74
+ if len(paths_list) > MAX_FILES_PER_REPO:
75
+ logger.warning(
76
+ f"You are about to upload {len(paths_list):,} files. "
77
+ f"This exceeds the recommended limit of {MAX_FILES_PER_REPO:,} files per repository.\n"
78
+ f"Consider:\n"
79
+ f" - Splitting your data into multiple repositories\n"
80
+ f" - Using fewer, larger files (e.g., parquet files)\n"
81
+ f" - See: https://huggingface.co/docs/hub/repositories-recommendations"
82
+ )
83
+
84
+ # Check 2: Files and subdirectories per folder
85
+ # Track immediate children (files and subdirs) for each folder
86
+ from collections import defaultdict
87
+
88
+ entries_per_folder: Dict[str, Any] = defaultdict(lambda: {"files": 0, "subdirs": set()})
89
+
90
+ for paths in paths_list:
91
+ path = Path(paths.path_in_repo)
92
+ parts = path.parts
93
+
94
+ # Count this file in its immediate parent directory
95
+ parent = str(path.parent) if str(path.parent) != "." else "."
96
+ entries_per_folder[parent]["files"] += 1
97
+
98
+ # Track immediate subdirectories for each parent folder
99
+ # Walk through the path components to track parent-child relationships
100
+ for i, child in enumerate(parts[:-1]):
101
+ parent = "." if i == 0 else "/".join(parts[:i])
102
+ entries_per_folder[parent]["subdirs"].add(child)
103
+
104
+ # Check limits for each folder
105
+ for folder, data in entries_per_folder.items():
106
+ file_count = data["files"]
107
+ subdir_count = len(data["subdirs"])
108
+ total_entries = file_count + subdir_count
109
+
110
+ if total_entries > MAX_FILES_PER_FOLDER:
111
+ folder_display = "root" if folder == "." else folder
112
+ logger.warning(
113
+ f"Folder '{folder_display}' contains {total_entries:,} entries "
114
+ f"({file_count:,} files and {subdir_count:,} subdirectories). "
115
+ f"This exceeds the recommended {MAX_FILES_PER_FOLDER:,} entries per folder.\n"
116
+ "Consider reorganising into sub-folders."
117
+ )
118
+
119
+ # Check 3: File sizes
120
+ large_files = []
121
+ very_large_files = []
122
+
123
+ for paths in paths_list:
124
+ size = paths.file_path.stat().st_size
125
+ size_gb = size / 1_000_000_000 # Use decimal GB as per Hub limits
126
+
127
+ if size_gb > MAX_FILE_SIZE_GB:
128
+ very_large_files.append((paths.path_in_repo, size_gb))
129
+ elif size_gb > RECOMMENDED_FILE_SIZE_GB:
130
+ large_files.append((paths.path_in_repo, size_gb))
131
+
132
+ # Warn about very large files (>50GB)
133
+ if very_large_files:
134
+ files_str = "\n - ".join(f"{path}: {size:.1f}GB" for path, size in very_large_files[:5])
135
+ more_str = f"\n ... and {len(very_large_files) - 5} more files" if len(very_large_files) > 5 else ""
136
+ logger.warning(
137
+ f"Found {len(very_large_files)} files exceeding the {MAX_FILE_SIZE_GB}GB hard limit:\n"
138
+ f" - {files_str}{more_str}\n"
139
+ f"These files may fail to upload. Consider splitting them into smaller chunks."
140
+ )
141
+
142
+ # Warn about large files (>20GB)
143
+ if large_files:
144
+ files_str = "\n - ".join(f"{path}: {size:.1f}GB" for path, size in large_files[:5])
145
+ more_str = f"\n ... and {len(large_files) - 5} more files" if len(large_files) > 5 else ""
146
+ logger.warning(
147
+ f"Found {len(large_files)} files larger than {RECOMMENDED_FILE_SIZE_GB}GB (recommended limit):\n"
148
+ f" - {files_str}{more_str}\n"
149
+ f"Large files may slow down loading and processing."
150
+ )
151
+
152
+ logger.info("Validation checks complete.")
153
+
154
+
155
+ def upload_large_folder_internal(
156
+ api: "HfApi",
157
+ repo_id: str,
158
+ folder_path: Union[str, Path],
159
+ *,
160
+ repo_type: str, # Repo type is required!
161
+ revision: Optional[str] = None,
162
+ private: Optional[bool] = None,
163
+ allow_patterns: Optional[Union[List[str], str]] = None,
164
+ ignore_patterns: Optional[Union[List[str], str]] = None,
165
+ num_workers: Optional[int] = None,
166
+ print_report: bool = True,
167
+ print_report_every: int = 60,
168
+ ):
169
+ """Upload a large folder to the Hub in the most resilient way possible.
170
+
171
+ See [`HfApi.upload_large_folder`] for the full documentation.
172
+ """
173
+ # 1. Check args and setup
174
+ if repo_type is None:
175
+ raise ValueError(
176
+ "For large uploads, `repo_type` is explicitly required. Please set it to `model`, `dataset` or `space`."
177
+ " If you are using the CLI, pass it as `--repo-type=model`."
178
+ )
179
+ if repo_type not in REPO_TYPES:
180
+ raise ValueError(f"Invalid repo type, must be one of {REPO_TYPES}")
181
+ if revision is None:
182
+ revision = DEFAULT_REVISION
183
+
184
+ folder_path = Path(folder_path).expanduser().resolve()
185
+ if not folder_path.is_dir():
186
+ raise ValueError(f"Provided path: '{folder_path}' is not a directory")
187
+
188
+ if ignore_patterns is None:
189
+ ignore_patterns = []
190
+ elif isinstance(ignore_patterns, str):
191
+ ignore_patterns = [ignore_patterns]
192
+ ignore_patterns += DEFAULT_IGNORE_PATTERNS
193
+
194
+ if num_workers is None:
195
+ nb_cores = os.cpu_count() or 1
196
+ num_workers = max(nb_cores - 2, 2) # Use all but 2 cores, or at least 2 cores
197
+
198
+ # 2. Create repo if missing
199
+ repo_url = api.create_repo(repo_id=repo_id, repo_type=repo_type, private=private, exist_ok=True)
200
+ logger.info(f"Repo created: {repo_url}")
201
+ repo_id = repo_url.repo_id
202
+ # 2.1 Check if xet is enabled to set batch file upload size
203
+ is_xet_enabled = (
204
+ is_xet_available()
205
+ and api.repo_info(
206
+ repo_id=repo_id,
207
+ repo_type=repo_type,
208
+ revision=revision,
209
+ expand="xetEnabled",
210
+ ).xet_enabled
211
+ )
212
+ upload_batch_size = UPLOAD_BATCH_SIZE_XET if is_xet_enabled else UPLOAD_BATCH_SIZE_LFS
213
+
214
+ # 3. List files to upload
215
+ filtered_paths_list = filter_repo_objects(
216
+ (path.relative_to(folder_path).as_posix() for path in folder_path.glob("**/*") if path.is_file()),
217
+ allow_patterns=allow_patterns,
218
+ ignore_patterns=ignore_patterns,
219
+ )
220
+ paths_list = [get_local_upload_paths(folder_path, relpath) for relpath in filtered_paths_list]
221
+ logger.info(f"Found {len(paths_list)} candidate files to upload")
222
+
223
+ # Validate upload against repository limits
224
+ _validate_upload_limits(paths_list)
225
+
226
+ logger.info("Starting upload...")
227
+
228
+ # Read metadata for each file
229
+ items = [
230
+ (paths, read_upload_metadata(folder_path, paths.path_in_repo))
231
+ for paths in tqdm(paths_list, desc="Recovering from metadata files")
232
+ ]
233
+
234
+ # 4. Start workers
235
+ status = LargeUploadStatus(items, upload_batch_size)
236
+ threads = [
237
+ threading.Thread(
238
+ target=_worker_job,
239
+ kwargs={
240
+ "status": status,
241
+ "api": api,
242
+ "repo_id": repo_id,
243
+ "repo_type": repo_type,
244
+ "revision": revision,
245
+ },
246
+ )
247
+ for _ in range(num_workers)
248
+ ]
249
+
250
+ for thread in threads:
251
+ thread.start()
252
+
253
+ # 5. Print regular reports
254
+ if print_report:
255
+ print("\n\n" + status.current_report())
256
+ last_report_ts = time.time()
257
+ while True:
258
+ time.sleep(1)
259
+ if time.time() - last_report_ts >= print_report_every:
260
+ if print_report:
261
+ _print_overwrite(status.current_report())
262
+ last_report_ts = time.time()
263
+ if status.is_done():
264
+ logging.info("Is done: exiting main loop")
265
+ break
266
+
267
+ for thread in threads:
268
+ thread.join()
269
+
270
+ logger.info(status.current_report())
271
+ logging.info("Upload is complete!")
272
+
273
+
274
+ ####################
275
+ # Logic to manage workers and synchronize tasks
276
+ ####################
277
+
278
+
279
+ class WorkerJob(enum.Enum):
280
+ SHA256 = enum.auto()
281
+ GET_UPLOAD_MODE = enum.auto()
282
+ PREUPLOAD_LFS = enum.auto()
283
+ COMMIT = enum.auto()
284
+ WAIT = enum.auto() # if no tasks are available but we don't want to exit
285
+
286
+
287
+ JOB_ITEM_T = Tuple[LocalUploadFilePaths, LocalUploadFileMetadata]
288
+
289
+
290
+ class LargeUploadStatus:
291
+ """Contains information, queues and tasks for a large upload process."""
292
+
293
+ def __init__(self, items: List[JOB_ITEM_T], upload_batch_size: int = 1):
294
+ self.items = items
295
+ self.queue_sha256: "queue.Queue[JOB_ITEM_T]" = queue.Queue()
296
+ self.queue_get_upload_mode: "queue.Queue[JOB_ITEM_T]" = queue.Queue()
297
+ self.queue_preupload_lfs: "queue.Queue[JOB_ITEM_T]" = queue.Queue()
298
+ self.queue_commit: "queue.Queue[JOB_ITEM_T]" = queue.Queue()
299
+ self.lock = Lock()
300
+
301
+ self.nb_workers_sha256: int = 0
302
+ self.nb_workers_get_upload_mode: int = 0
303
+ self.nb_workers_preupload_lfs: int = 0
304
+ self.upload_batch_size: int = upload_batch_size
305
+ self.nb_workers_commit: int = 0
306
+ self.nb_workers_waiting: int = 0
307
+ self.last_commit_attempt: Optional[float] = None
308
+
309
+ self._started_at = datetime.now()
310
+ self._chunk_idx: int = 1
311
+ self._chunk_lock: Lock = Lock()
312
+
313
+ # Setup queues
314
+ for item in self.items:
315
+ paths, metadata = item
316
+ if metadata.sha256 is None:
317
+ self.queue_sha256.put(item)
318
+ elif metadata.upload_mode is None:
319
+ self.queue_get_upload_mode.put(item)
320
+ elif metadata.upload_mode == "lfs" and not metadata.is_uploaded:
321
+ self.queue_preupload_lfs.put(item)
322
+ elif not metadata.is_committed:
323
+ self.queue_commit.put(item)
324
+ else:
325
+ logger.debug(f"Skipping file {paths.path_in_repo} (already uploaded and committed)")
326
+
327
+ def target_chunk(self) -> int:
328
+ with self._chunk_lock:
329
+ return COMMIT_SIZE_SCALE[self._chunk_idx]
330
+
331
+ def update_chunk(self, success: bool, nb_items: int, duration: float) -> None:
332
+ with self._chunk_lock:
333
+ if not success:
334
+ logger.warning(f"Failed to commit {nb_items} files at once. Will retry with less files in next batch.")
335
+ self._chunk_idx -= 1
336
+ elif nb_items >= COMMIT_SIZE_SCALE[self._chunk_idx] and duration < 40:
337
+ logger.info(f"Successfully committed {nb_items} at once. Increasing the limit for next batch.")
338
+ self._chunk_idx += 1
339
+
340
+ self._chunk_idx = max(0, min(self._chunk_idx, len(COMMIT_SIZE_SCALE) - 1))
341
+
342
+ def current_report(self) -> str:
343
+ """Generate a report of the current status of the large upload."""
344
+ nb_hashed = 0
345
+ size_hashed = 0
346
+ nb_preuploaded = 0
347
+ nb_lfs = 0
348
+ nb_lfs_unsure = 0
349
+ size_preuploaded = 0
350
+ nb_committed = 0
351
+ size_committed = 0
352
+ total_size = 0
353
+ ignored_files = 0
354
+ total_files = 0
355
+
356
+ with self.lock:
357
+ for _, metadata in self.items:
358
+ if metadata.should_ignore:
359
+ ignored_files += 1
360
+ continue
361
+ total_size += metadata.size
362
+ total_files += 1
363
+ if metadata.sha256 is not None:
364
+ nb_hashed += 1
365
+ size_hashed += metadata.size
366
+ if metadata.upload_mode == "lfs":
367
+ nb_lfs += 1
368
+ if metadata.upload_mode is None:
369
+ nb_lfs_unsure += 1
370
+ if metadata.is_uploaded:
371
+ nb_preuploaded += 1
372
+ size_preuploaded += metadata.size
373
+ if metadata.is_committed:
374
+ nb_committed += 1
375
+ size_committed += metadata.size
376
+ total_size_str = _format_size(total_size)
377
+
378
+ now = datetime.now()
379
+ now_str = now.strftime("%Y-%m-%d %H:%M:%S")
380
+ elapsed = now - self._started_at
381
+ elapsed_str = str(elapsed).split(".")[0] # remove milliseconds
382
+
383
+ message = "\n" + "-" * 10
384
+ message += f" {now_str} ({elapsed_str}) "
385
+ message += "-" * 10 + "\n"
386
+
387
+ message += "Files: "
388
+ message += f"hashed {nb_hashed}/{total_files} ({_format_size(size_hashed)}/{total_size_str}) | "
389
+ message += f"pre-uploaded: {nb_preuploaded}/{nb_lfs} ({_format_size(size_preuploaded)}/{total_size_str})"
390
+ if nb_lfs_unsure > 0:
391
+ message += f" (+{nb_lfs_unsure} unsure)"
392
+ message += f" | committed: {nb_committed}/{total_files} ({_format_size(size_committed)}/{total_size_str})"
393
+ message += f" | ignored: {ignored_files}\n"
394
+
395
+ message += "Workers: "
396
+ message += f"hashing: {self.nb_workers_sha256} | "
397
+ message += f"get upload mode: {self.nb_workers_get_upload_mode} | "
398
+ message += f"pre-uploading: {self.nb_workers_preupload_lfs} | "
399
+ message += f"committing: {self.nb_workers_commit} | "
400
+ message += f"waiting: {self.nb_workers_waiting}\n"
401
+ message += "-" * 51
402
+
403
+ return message
404
+
405
+ def is_done(self) -> bool:
406
+ with self.lock:
407
+ return all(metadata.is_committed or metadata.should_ignore for _, metadata in self.items)
408
+
409
+
410
+ def _worker_job(
411
+ status: LargeUploadStatus,
412
+ api: "HfApi",
413
+ repo_id: str,
414
+ repo_type: str,
415
+ revision: str,
416
+ ):
417
+ """
418
+ Main process for a worker. The worker will perform tasks based on the priority list until all files are uploaded
419
+ and committed. If no tasks are available, the worker will wait for 10 seconds before checking again.
420
+
421
+ If a task fails for any reason, the item(s) are put back in the queue for another worker to pick up.
422
+
423
+ Read `upload_large_folder` docstring for more information on how tasks are prioritized.
424
+ """
425
+ while True:
426
+ next_job: Optional[Tuple[WorkerJob, List[JOB_ITEM_T]]] = None
427
+
428
+ # Determine next task
429
+ next_job = _determine_next_job(status)
430
+ if next_job is None:
431
+ return
432
+ job, items = next_job
433
+
434
+ # Perform task
435
+ if job == WorkerJob.SHA256:
436
+ item = items[0] # single item
437
+ try:
438
+ _compute_sha256(item)
439
+ status.queue_get_upload_mode.put(item)
440
+ except KeyboardInterrupt:
441
+ raise
442
+ except Exception as e:
443
+ logger.error(f"Failed to compute sha256: {e}")
444
+ traceback.format_exc()
445
+ status.queue_sha256.put(item)
446
+
447
+ with status.lock:
448
+ status.nb_workers_sha256 -= 1
449
+
450
+ elif job == WorkerJob.GET_UPLOAD_MODE:
451
+ try:
452
+ _get_upload_mode(items, api=api, repo_id=repo_id, repo_type=repo_type, revision=revision)
453
+ except KeyboardInterrupt:
454
+ raise
455
+ except Exception as e:
456
+ logger.error(f"Failed to get upload mode: {e}")
457
+ traceback.format_exc()
458
+
459
+ # Items are either:
460
+ # - dropped (if should_ignore)
461
+ # - put in LFS queue (if LFS)
462
+ # - put in commit queue (if regular)
463
+ # - or put back (if error occurred).
464
+ for item in items:
465
+ _, metadata = item
466
+ if metadata.should_ignore:
467
+ continue
468
+ if metadata.upload_mode == "lfs":
469
+ status.queue_preupload_lfs.put(item)
470
+ elif metadata.upload_mode == "regular":
471
+ status.queue_commit.put(item)
472
+ else:
473
+ status.queue_get_upload_mode.put(item)
474
+
475
+ with status.lock:
476
+ status.nb_workers_get_upload_mode -= 1
477
+
478
+ elif job == WorkerJob.PREUPLOAD_LFS:
479
+ try:
480
+ _preupload_lfs(items, api=api, repo_id=repo_id, repo_type=repo_type, revision=revision)
481
+ for item in items:
482
+ status.queue_commit.put(item)
483
+ except KeyboardInterrupt:
484
+ raise
485
+ except Exception as e:
486
+ logger.error(f"Failed to preupload LFS: {e}")
487
+ traceback.format_exc()
488
+ for item in items:
489
+ status.queue_preupload_lfs.put(item)
490
+
491
+ with status.lock:
492
+ status.nb_workers_preupload_lfs -= 1
493
+
494
+ elif job == WorkerJob.COMMIT:
495
+ start_ts = time.time()
496
+ success = True
497
+ try:
498
+ _commit(items, api=api, repo_id=repo_id, repo_type=repo_type, revision=revision)
499
+ except KeyboardInterrupt:
500
+ raise
501
+ except Exception as e:
502
+ logger.error(f"Failed to commit: {e}")
503
+ traceback.format_exc()
504
+ for item in items:
505
+ status.queue_commit.put(item)
506
+ success = False
507
+ duration = time.time() - start_ts
508
+ status.update_chunk(success, len(items), duration)
509
+ with status.lock:
510
+ status.last_commit_attempt = time.time()
511
+ status.nb_workers_commit -= 1
512
+
513
+ elif job == WorkerJob.WAIT:
514
+ time.sleep(WAITING_TIME_IF_NO_TASKS)
515
+ with status.lock:
516
+ status.nb_workers_waiting -= 1
517
+
518
+
519
+ def _determine_next_job(status: LargeUploadStatus) -> Optional[Tuple[WorkerJob, List[JOB_ITEM_T]]]:
520
+ with status.lock:
521
+ # 1. Commit if more than 5 minutes since last commit attempt (and at least 1 file)
522
+ if (
523
+ status.nb_workers_commit == 0
524
+ and status.queue_commit.qsize() > 0
525
+ and status.last_commit_attempt is not None
526
+ and time.time() - status.last_commit_attempt > 5 * 60
527
+ ):
528
+ status.nb_workers_commit += 1
529
+ logger.debug("Job: commit (more than 5 minutes since last commit attempt)")
530
+ return (WorkerJob.COMMIT, _get_n(status.queue_commit, status.target_chunk()))
531
+
532
+ # 2. Commit if at least 100 files are ready to commit
533
+ elif status.nb_workers_commit == 0 and status.queue_commit.qsize() >= 150:
534
+ status.nb_workers_commit += 1
535
+ logger.debug("Job: commit (>100 files ready)")
536
+ return (WorkerJob.COMMIT, _get_n(status.queue_commit, status.target_chunk()))
537
+
538
+ # 3. Get upload mode if at least 100 files
539
+ elif status.queue_get_upload_mode.qsize() >= MAX_NB_FILES_FETCH_UPLOAD_MODE:
540
+ status.nb_workers_get_upload_mode += 1
541
+ logger.debug(f"Job: get upload mode (>{MAX_NB_FILES_FETCH_UPLOAD_MODE} files ready)")
542
+ return (WorkerJob.GET_UPLOAD_MODE, _get_n(status.queue_get_upload_mode, MAX_NB_FILES_FETCH_UPLOAD_MODE))
543
+
544
+ # 4. Preupload LFS file if at least `status.upload_batch_size` files and no worker is preuploading LFS
545
+ elif status.queue_preupload_lfs.qsize() >= status.upload_batch_size and status.nb_workers_preupload_lfs == 0:
546
+ status.nb_workers_preupload_lfs += 1
547
+ logger.debug("Job: preupload LFS (no other worker preuploading LFS)")
548
+ return (WorkerJob.PREUPLOAD_LFS, _get_n(status.queue_preupload_lfs, status.upload_batch_size))
549
+
550
+ # 5. Compute sha256 if at least 1 file and no worker is computing sha256
551
+ elif status.queue_sha256.qsize() > 0 and status.nb_workers_sha256 == 0:
552
+ status.nb_workers_sha256 += 1
553
+ logger.debug("Job: sha256 (no other worker computing sha256)")
554
+ return (WorkerJob.SHA256, _get_one(status.queue_sha256))
555
+
556
+ # 6. Get upload mode if at least 1 file and no worker is getting upload mode
557
+ elif status.queue_get_upload_mode.qsize() > 0 and status.nb_workers_get_upload_mode == 0:
558
+ status.nb_workers_get_upload_mode += 1
559
+ logger.debug("Job: get upload mode (no other worker getting upload mode)")
560
+ return (WorkerJob.GET_UPLOAD_MODE, _get_n(status.queue_get_upload_mode, MAX_NB_FILES_FETCH_UPLOAD_MODE))
561
+
562
+ # 7. Preupload LFS file if at least `status.upload_batch_size` files
563
+ # Skip if hf_transfer is enabled and there is already a worker preuploading LFS
564
+ elif status.queue_preupload_lfs.qsize() >= status.upload_batch_size and (
565
+ status.nb_workers_preupload_lfs == 0 or not constants.HF_HUB_ENABLE_HF_TRANSFER
566
+ ):
567
+ status.nb_workers_preupload_lfs += 1
568
+ logger.debug("Job: preupload LFS")
569
+ return (WorkerJob.PREUPLOAD_LFS, _get_n(status.queue_preupload_lfs, status.upload_batch_size))
570
+
571
+ # 8. Compute sha256 if at least 1 file
572
+ elif status.queue_sha256.qsize() > 0:
573
+ status.nb_workers_sha256 += 1
574
+ logger.debug("Job: sha256")
575
+ return (WorkerJob.SHA256, _get_one(status.queue_sha256))
576
+
577
+ # 9. Get upload mode if at least 1 file
578
+ elif status.queue_get_upload_mode.qsize() > 0:
579
+ status.nb_workers_get_upload_mode += 1
580
+ logger.debug("Job: get upload mode")
581
+ return (WorkerJob.GET_UPLOAD_MODE, _get_n(status.queue_get_upload_mode, MAX_NB_FILES_FETCH_UPLOAD_MODE))
582
+
583
+ # 10. Preupload LFS file if at least 1 file
584
+ elif status.queue_preupload_lfs.qsize() > 0:
585
+ status.nb_workers_preupload_lfs += 1
586
+ logger.debug("Job: preupload LFS")
587
+ return (WorkerJob.PREUPLOAD_LFS, _get_n(status.queue_preupload_lfs, status.upload_batch_size))
588
+
589
+ # 11. Commit if at least 1 file and 1 min since last commit attempt
590
+ elif (
591
+ status.nb_workers_commit == 0
592
+ and status.queue_commit.qsize() > 0
593
+ and status.last_commit_attempt is not None
594
+ and time.time() - status.last_commit_attempt > 1 * 60
595
+ ):
596
+ status.nb_workers_commit += 1
597
+ logger.debug("Job: commit (1 min since last commit attempt)")
598
+ return (WorkerJob.COMMIT, _get_n(status.queue_commit, status.target_chunk()))
599
+
600
+ # 12. Commit if at least 1 file all other queues are empty and all workers are waiting
601
+ # e.g. when it's the last commit
602
+ elif (
603
+ status.nb_workers_commit == 0
604
+ and status.queue_commit.qsize() > 0
605
+ and status.queue_sha256.qsize() == 0
606
+ and status.queue_get_upload_mode.qsize() == 0
607
+ and status.queue_preupload_lfs.qsize() == 0
608
+ and status.nb_workers_sha256 == 0
609
+ and status.nb_workers_get_upload_mode == 0
610
+ and status.nb_workers_preupload_lfs == 0
611
+ ):
612
+ status.nb_workers_commit += 1
613
+ logger.debug("Job: commit")
614
+ return (WorkerJob.COMMIT, _get_n(status.queue_commit, status.target_chunk()))
615
+
616
+ # 13. If all queues are empty, exit
617
+ elif all(metadata.is_committed or metadata.should_ignore for _, metadata in status.items):
618
+ logger.info("All files have been processed! Exiting worker.")
619
+ return None
620
+
621
+ # 14. If no task is available, wait
622
+ else:
623
+ status.nb_workers_waiting += 1
624
+ logger.debug(f"No task available, waiting... ({WAITING_TIME_IF_NO_TASKS}s)")
625
+ return (WorkerJob.WAIT, [])
626
+
627
+
628
+ ####################
629
+ # Atomic jobs (sha256, get_upload_mode, preupload_lfs, commit)
630
+ ####################
631
+
632
+
633
+ def _compute_sha256(item: JOB_ITEM_T) -> None:
634
+ """Compute sha256 of a file and save it in metadata."""
635
+ paths, metadata = item
636
+ if metadata.sha256 is None:
637
+ with paths.file_path.open("rb") as f:
638
+ metadata.sha256 = sha_fileobj(f).hex()
639
+ metadata.save(paths)
640
+
641
+
642
+ def _get_upload_mode(items: List[JOB_ITEM_T], api: "HfApi", repo_id: str, repo_type: str, revision: str) -> None:
643
+ """Get upload mode for each file and update metadata.
644
+
645
+ Also receive info if the file should be ignored.
646
+ """
647
+ additions = [_build_hacky_operation(item) for item in items]
648
+ _fetch_upload_modes(
649
+ additions=additions,
650
+ repo_type=repo_type,
651
+ repo_id=repo_id,
652
+ headers=api._build_hf_headers(),
653
+ revision=quote(revision, safe=""),
654
+ endpoint=api.endpoint,
655
+ )
656
+ for item, addition in zip(items, additions):
657
+ paths, metadata = item
658
+ metadata.upload_mode = addition._upload_mode
659
+ metadata.should_ignore = addition._should_ignore
660
+ metadata.remote_oid = addition._remote_oid
661
+ metadata.save(paths)
662
+
663
+
664
+ def _preupload_lfs(items: List[JOB_ITEM_T], api: "HfApi", repo_id: str, repo_type: str, revision: str) -> None:
665
+ """Preupload LFS files and update metadata."""
666
+ additions = [_build_hacky_operation(item) for item in items]
667
+ api.preupload_lfs_files(
668
+ repo_id=repo_id,
669
+ repo_type=repo_type,
670
+ revision=revision,
671
+ additions=additions,
672
+ )
673
+
674
+ for paths, metadata in items:
675
+ metadata.is_uploaded = True
676
+ metadata.save(paths)
677
+
678
+
679
+ def _commit(items: List[JOB_ITEM_T], api: "HfApi", repo_id: str, repo_type: str, revision: str) -> None:
680
+ """Commit files to the repo."""
681
+ additions = [_build_hacky_operation(item) for item in items]
682
+ api.create_commit(
683
+ repo_id=repo_id,
684
+ repo_type=repo_type,
685
+ revision=revision,
686
+ operations=additions,
687
+ commit_message="Add files using upload-large-folder tool",
688
+ )
689
+ for paths, metadata in items:
690
+ metadata.is_committed = True
691
+ metadata.save(paths)
692
+
693
+
694
+ ####################
695
+ # Hacks with CommitOperationAdd to bypass checks/sha256 calculation
696
+ ####################
697
+
698
+
699
+ class HackyCommitOperationAdd(CommitOperationAdd):
700
+ def __post_init__(self) -> None:
701
+ if isinstance(self.path_or_fileobj, Path):
702
+ self.path_or_fileobj = str(self.path_or_fileobj)
703
+
704
+
705
+ def _build_hacky_operation(item: JOB_ITEM_T) -> HackyCommitOperationAdd:
706
+ paths, metadata = item
707
+ operation = HackyCommitOperationAdd(path_in_repo=paths.path_in_repo, path_or_fileobj=paths.file_path)
708
+ with paths.file_path.open("rb") as file:
709
+ sample = file.peek(512)[:512]
710
+ if metadata.sha256 is None:
711
+ raise ValueError("sha256 must have been computed by now!")
712
+ operation.upload_info = UploadInfo(sha256=bytes.fromhex(metadata.sha256), size=metadata.size, sample=sample)
713
+ operation._upload_mode = metadata.upload_mode # type: ignore[assignment]
714
+ operation._should_ignore = metadata.should_ignore
715
+ operation._remote_oid = metadata.remote_oid
716
+ return operation
717
+
718
+
719
+ ####################
720
+ # Misc helpers
721
+ ####################
722
+
723
+
724
+ def _get_one(queue: "queue.Queue[JOB_ITEM_T]") -> List[JOB_ITEM_T]:
725
+ return [queue.get()]
726
+
727
+
728
+ def _get_n(queue: "queue.Queue[JOB_ITEM_T]", n: int) -> List[JOB_ITEM_T]:
729
+ return [queue.get() for _ in range(min(queue.qsize(), n))]
730
+
731
+
732
+ def _print_overwrite(report: str) -> None:
733
+ """Print a report, overwriting the previous lines.
734
+
735
+ Since tqdm in using `sys.stderr` to (re-)write progress bars, we need to use `sys.stdout`
736
+ to print the report.
737
+
738
+ Note: works well only if no other process is writing to `sys.stdout`!
739
+ """
740
+ report += "\n"
741
+ # Get terminal width
742
+ terminal_width = shutil.get_terminal_size().columns
743
+
744
+ # Count number of lines that should be cleared
745
+ nb_lines = sum(len(line) // terminal_width + 1 for line in report.splitlines())
746
+
747
+ # Clear previous lines based on the number of lines in the report
748
+ for _ in range(nb_lines):
749
+ sys.stdout.write("\r\033[K") # Clear line
750
+ sys.stdout.write("\033[F") # Move cursor up one line
751
+
752
+ # Print the new report, filling remaining space with whitespace
753
+ sys.stdout.write(report)
754
+ sys.stdout.write(" " * (terminal_width - len(report.splitlines()[-1])))
755
+ sys.stdout.flush()
venv/lib/python3.13/site-packages/huggingface_hub/_webhooks_payload.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023-present, the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Contains data structures to parse the webhooks payload."""
16
+
17
+ from typing import List, Literal, Optional
18
+
19
+ from .utils import is_pydantic_available
20
+
21
+
22
+ if is_pydantic_available():
23
+ from pydantic import BaseModel
24
+ else:
25
+ # Define a dummy BaseModel to avoid import errors when pydantic is not installed
26
+ # Import error will be raised when trying to use the class
27
+
28
+ class BaseModel: # type: ignore [no-redef]
29
+ def __init__(self, *args, **kwargs) -> None:
30
+ raise ImportError(
31
+ "You must have `pydantic` installed to use `WebhookPayload`. This is an optional dependency that"
32
+ " should be installed separately. Please run `pip install --upgrade pydantic` and retry."
33
+ )
34
+
35
+
36
+ # This is an adaptation of the ReportV3 interface implemented in moon-landing. V0, V1 and V2 have been ignored as they
37
+ # are not in used anymore. To keep in sync when format is updated in
38
+ # https://github.com/huggingface/moon-landing/blob/main/server/lib/HFWebhooks.ts (internal link).
39
+
40
+
41
+ WebhookEvent_T = Literal[
42
+ "create",
43
+ "delete",
44
+ "move",
45
+ "update",
46
+ ]
47
+ RepoChangeEvent_T = Literal[
48
+ "add",
49
+ "move",
50
+ "remove",
51
+ "update",
52
+ ]
53
+ RepoType_T = Literal[
54
+ "dataset",
55
+ "model",
56
+ "space",
57
+ ]
58
+ DiscussionStatus_T = Literal[
59
+ "closed",
60
+ "draft",
61
+ "open",
62
+ "merged",
63
+ ]
64
+ SupportedWebhookVersion = Literal[3]
65
+
66
+
67
+ class ObjectId(BaseModel):
68
+ id: str
69
+
70
+
71
+ class WebhookPayloadUrl(BaseModel):
72
+ web: str
73
+ api: Optional[str] = None
74
+
75
+
76
+ class WebhookPayloadMovedTo(BaseModel):
77
+ name: str
78
+ owner: ObjectId
79
+
80
+
81
+ class WebhookPayloadWebhook(ObjectId):
82
+ version: SupportedWebhookVersion
83
+
84
+
85
+ class WebhookPayloadEvent(BaseModel):
86
+ action: WebhookEvent_T
87
+ scope: str
88
+
89
+
90
+ class WebhookPayloadDiscussionChanges(BaseModel):
91
+ base: str
92
+ mergeCommitId: Optional[str] = None
93
+
94
+
95
+ class WebhookPayloadComment(ObjectId):
96
+ author: ObjectId
97
+ hidden: bool
98
+ content: Optional[str] = None
99
+ url: WebhookPayloadUrl
100
+
101
+
102
+ class WebhookPayloadDiscussion(ObjectId):
103
+ num: int
104
+ author: ObjectId
105
+ url: WebhookPayloadUrl
106
+ title: str
107
+ isPullRequest: bool
108
+ status: DiscussionStatus_T
109
+ changes: Optional[WebhookPayloadDiscussionChanges] = None
110
+ pinned: Optional[bool] = None
111
+
112
+
113
+ class WebhookPayloadRepo(ObjectId):
114
+ owner: ObjectId
115
+ head_sha: Optional[str] = None
116
+ name: str
117
+ private: bool
118
+ subdomain: Optional[str] = None
119
+ tags: Optional[List[str]] = None
120
+ type: Literal["dataset", "model", "space"]
121
+ url: WebhookPayloadUrl
122
+
123
+
124
+ class WebhookPayloadUpdatedRef(BaseModel):
125
+ ref: str
126
+ oldSha: Optional[str] = None
127
+ newSha: Optional[str] = None
128
+
129
+
130
+ class WebhookPayload(BaseModel):
131
+ event: WebhookPayloadEvent
132
+ repo: WebhookPayloadRepo
133
+ discussion: Optional[WebhookPayloadDiscussion] = None
134
+ comment: Optional[WebhookPayloadComment] = None
135
+ webhook: WebhookPayloadWebhook
136
+ movedTo: Optional[WebhookPayloadMovedTo] = None
137
+ updatedRefs: Optional[List[WebhookPayloadUpdatedRef]] = None
venv/lib/python3.13/site-packages/huggingface_hub/_webhooks_server.py ADDED
@@ -0,0 +1,388 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023-present, the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Contains `WebhooksServer` and `webhook_endpoint` to create a webhook server easily."""
16
+
17
+ import atexit
18
+ import inspect
19
+ import os
20
+ from functools import wraps
21
+ from typing import TYPE_CHECKING, Any, Callable, Dict, Optional
22
+
23
+ from .utils import experimental, is_fastapi_available, is_gradio_available
24
+
25
+
26
+ if TYPE_CHECKING:
27
+ import gradio as gr
28
+ from fastapi import Request
29
+
30
+ if is_fastapi_available():
31
+ from fastapi import FastAPI, Request
32
+ from fastapi.responses import JSONResponse
33
+ else:
34
+ # Will fail at runtime if FastAPI is not available
35
+ FastAPI = Request = JSONResponse = None # type: ignore [misc, assignment]
36
+
37
+
38
+ _global_app: Optional["WebhooksServer"] = None
39
+ _is_local = os.environ.get("SPACE_ID") is None
40
+
41
+
42
+ @experimental
43
+ class WebhooksServer:
44
+ """
45
+ The [`WebhooksServer`] class lets you create an instance of a Gradio app that can receive Huggingface webhooks.
46
+ These webhooks can be registered using the [`~WebhooksServer.add_webhook`] decorator. Webhook endpoints are added to
47
+ the app as a POST endpoint to the FastAPI router. Once all the webhooks are registered, the `launch` method has to be
48
+ called to start the app.
49
+
50
+ It is recommended to accept [`WebhookPayload`] as the first argument of the webhook function. It is a Pydantic
51
+ model that contains all the information about the webhook event. The data will be parsed automatically for you.
52
+
53
+ Check out the [webhooks guide](../guides/webhooks_server) for a step-by-step tutorial on how to setup your
54
+ WebhooksServer and deploy it on a Space.
55
+
56
+ <Tip warning={true}>
57
+
58
+ `WebhooksServer` is experimental. Its API is subject to change in the future.
59
+
60
+ </Tip>
61
+
62
+ <Tip warning={true}>
63
+
64
+ You must have `gradio` installed to use `WebhooksServer` (`pip install --upgrade gradio`).
65
+
66
+ </Tip>
67
+
68
+ Args:
69
+ ui (`gradio.Blocks`, optional):
70
+ A Gradio UI instance to be used as the Space landing page. If `None`, a UI displaying instructions
71
+ about the configured webhooks is created.
72
+ webhook_secret (`str`, optional):
73
+ A secret key to verify incoming webhook requests. You can set this value to any secret you want as long as
74
+ you also configure it in your [webhooks settings panel](https://huggingface.co/settings/webhooks). You
75
+ can also set this value as the `WEBHOOK_SECRET` environment variable. If no secret is provided, the
76
+ webhook endpoints are opened without any security.
77
+
78
+ Example:
79
+
80
+ ```python
81
+ import gradio as gr
82
+ from huggingface_hub import WebhooksServer, WebhookPayload
83
+
84
+ with gr.Blocks() as ui:
85
+ ...
86
+
87
+ app = WebhooksServer(ui=ui, webhook_secret="my_secret_key")
88
+
89
+ @app.add_webhook("/say_hello")
90
+ async def hello(payload: WebhookPayload):
91
+ return {"message": "hello"}
92
+
93
+ app.launch()
94
+ ```
95
+ """
96
+
97
+ def __new__(cls, *args, **kwargs) -> "WebhooksServer":
98
+ if not is_gradio_available():
99
+ raise ImportError(
100
+ "You must have `gradio` installed to use `WebhooksServer`. Please run `pip install --upgrade gradio`"
101
+ " first."
102
+ )
103
+ if not is_fastapi_available():
104
+ raise ImportError(
105
+ "You must have `fastapi` installed to use `WebhooksServer`. Please run `pip install --upgrade fastapi`"
106
+ " first."
107
+ )
108
+ return super().__new__(cls)
109
+
110
+ def __init__(
111
+ self,
112
+ ui: Optional["gr.Blocks"] = None,
113
+ webhook_secret: Optional[str] = None,
114
+ ) -> None:
115
+ self._ui = ui
116
+
117
+ self.webhook_secret = webhook_secret or os.getenv("WEBHOOK_SECRET")
118
+ self.registered_webhooks: Dict[str, Callable] = {}
119
+ _warn_on_empty_secret(self.webhook_secret)
120
+
121
+ def add_webhook(self, path: Optional[str] = None) -> Callable:
122
+ """
123
+ Decorator to add a webhook to the [`WebhooksServer`] server.
124
+
125
+ Args:
126
+ path (`str`, optional):
127
+ The URL path to register the webhook function. If not provided, the function name will be used as the
128
+ path. In any case, all webhooks are registered under `/webhooks`.
129
+
130
+ Raises:
131
+ ValueError: If the provided path is already registered as a webhook.
132
+
133
+ Example:
134
+ ```python
135
+ from huggingface_hub import WebhooksServer, WebhookPayload
136
+
137
+ app = WebhooksServer()
138
+
139
+ @app.add_webhook
140
+ async def trigger_training(payload: WebhookPayload):
141
+ if payload.repo.type == "dataset" and payload.event.action == "update":
142
+ # Trigger a training job if a dataset is updated
143
+ ...
144
+
145
+ app.launch()
146
+ ```
147
+ """
148
+ # Usage: directly as decorator. Example: `@app.add_webhook`
149
+ if callable(path):
150
+ # If path is a function, it means it was used as a decorator without arguments
151
+ return self.add_webhook()(path)
152
+
153
+ # Usage: provide a path. Example: `@app.add_webhook(...)`
154
+ @wraps(FastAPI.post)
155
+ def _inner_post(*args, **kwargs):
156
+ func = args[0]
157
+ abs_path = f"/webhooks/{(path or func.__name__).strip('/')}"
158
+ if abs_path in self.registered_webhooks:
159
+ raise ValueError(f"Webhook {abs_path} already exists.")
160
+ self.registered_webhooks[abs_path] = func
161
+
162
+ return _inner_post
163
+
164
+ def launch(self, prevent_thread_lock: bool = False, **launch_kwargs: Any) -> None:
165
+ """Launch the Gradio app and register webhooks to the underlying FastAPI server.
166
+
167
+ Input parameters are forwarded to Gradio when launching the app.
168
+ """
169
+ ui = self._ui or self._get_default_ui()
170
+
171
+ # Start Gradio App
172
+ # - as non-blocking so that webhooks can be added afterwards
173
+ # - as shared if launch locally (to debug webhooks)
174
+ launch_kwargs.setdefault("share", _is_local)
175
+ self.fastapi_app, _, _ = ui.launch(prevent_thread_lock=True, **launch_kwargs)
176
+
177
+ # Register webhooks to FastAPI app
178
+ for path, func in self.registered_webhooks.items():
179
+ # Add secret check if required
180
+ if self.webhook_secret is not None:
181
+ func = _wrap_webhook_to_check_secret(func, webhook_secret=self.webhook_secret)
182
+
183
+ # Add route to FastAPI app
184
+ self.fastapi_app.post(path)(func)
185
+
186
+ # Print instructions and block main thread
187
+ space_host = os.environ.get("SPACE_HOST")
188
+ url = "https://" + space_host if space_host is not None else (ui.share_url or ui.local_url)
189
+ if url is None:
190
+ raise ValueError("Cannot find the URL of the app. Please provide a valid `ui` or update `gradio` version.")
191
+ url = url.strip("/")
192
+ message = "\nWebhooks are correctly setup and ready to use:"
193
+ message += "\n" + "\n".join(f" - POST {url}{webhook}" for webhook in self.registered_webhooks)
194
+ message += "\nGo to https://huggingface.co/settings/webhooks to setup your webhooks."
195
+ print(message)
196
+
197
+ if not prevent_thread_lock:
198
+ ui.block_thread()
199
+
200
+ def _get_default_ui(self) -> "gr.Blocks":
201
+ """Default UI if not provided (lists webhooks and provides basic instructions)."""
202
+ import gradio as gr
203
+
204
+ with gr.Blocks() as ui:
205
+ gr.Markdown("# This is an app to process 🤗 Webhooks")
206
+ gr.Markdown(
207
+ "Webhooks are a foundation for MLOps-related features. They allow you to listen for new changes on"
208
+ " specific repos or to all repos belonging to particular set of users/organizations (not just your"
209
+ " repos, but any repo). Check out this [guide](https://huggingface.co/docs/hub/webhooks) to get to"
210
+ " know more about webhooks on the Huggingface Hub."
211
+ )
212
+ gr.Markdown(
213
+ f"{len(self.registered_webhooks)} webhook(s) are registered:"
214
+ + "\n\n"
215
+ + "\n ".join(
216
+ f"- [{webhook_path}]({_get_webhook_doc_url(webhook.__name__, webhook_path)})"
217
+ for webhook_path, webhook in self.registered_webhooks.items()
218
+ )
219
+ )
220
+ gr.Markdown(
221
+ "Go to https://huggingface.co/settings/webhooks to setup your webhooks."
222
+ + "\nYou app is running locally. Please look at the logs to check the full URL you need to set."
223
+ if _is_local
224
+ else (
225
+ "\nThis app is running on a Space. You can find the corresponding URL in the options menu"
226
+ " (top-right) > 'Embed the Space'. The URL looks like 'https://{username}-{repo_name}.hf.space'."
227
+ )
228
+ )
229
+ return ui
230
+
231
+
232
+ @experimental
233
+ def webhook_endpoint(path: Optional[str] = None) -> Callable:
234
+ """Decorator to start a [`WebhooksServer`] and register the decorated function as a webhook endpoint.
235
+
236
+ This is a helper to get started quickly. If you need more flexibility (custom landing page or webhook secret),
237
+ you can use [`WebhooksServer`] directly. You can register multiple webhook endpoints (to the same server) by using
238
+ this decorator multiple times.
239
+
240
+ Check out the [webhooks guide](../guides/webhooks_server) for a step-by-step tutorial on how to setup your
241
+ server and deploy it on a Space.
242
+
243
+ <Tip warning={true}>
244
+
245
+ `webhook_endpoint` is experimental. Its API is subject to change in the future.
246
+
247
+ </Tip>
248
+
249
+ <Tip warning={true}>
250
+
251
+ You must have `gradio` installed to use `webhook_endpoint` (`pip install --upgrade gradio`).
252
+
253
+ </Tip>
254
+
255
+ Args:
256
+ path (`str`, optional):
257
+ The URL path to register the webhook function. If not provided, the function name will be used as the path.
258
+ In any case, all webhooks are registered under `/webhooks`.
259
+
260
+ Examples:
261
+ The default usage is to register a function as a webhook endpoint. The function name will be used as the path.
262
+ The server will be started automatically at exit (i.e. at the end of the script).
263
+
264
+ ```python
265
+ from huggingface_hub import webhook_endpoint, WebhookPayload
266
+
267
+ @webhook_endpoint
268
+ async def trigger_training(payload: WebhookPayload):
269
+ if payload.repo.type == "dataset" and payload.event.action == "update":
270
+ # Trigger a training job if a dataset is updated
271
+ ...
272
+
273
+ # Server is automatically started at the end of the script.
274
+ ```
275
+
276
+ Advanced usage: register a function as a webhook endpoint and start the server manually. This is useful if you
277
+ are running it in a notebook.
278
+
279
+ ```python
280
+ from huggingface_hub import webhook_endpoint, WebhookPayload
281
+
282
+ @webhook_endpoint
283
+ async def trigger_training(payload: WebhookPayload):
284
+ if payload.repo.type == "dataset" and payload.event.action == "update":
285
+ # Trigger a training job if a dataset is updated
286
+ ...
287
+
288
+ # Start the server manually
289
+ trigger_training.launch()
290
+ ```
291
+ """
292
+ if callable(path):
293
+ # If path is a function, it means it was used as a decorator without arguments
294
+ return webhook_endpoint()(path)
295
+
296
+ @wraps(WebhooksServer.add_webhook)
297
+ def _inner(func: Callable) -> Callable:
298
+ app = _get_global_app()
299
+ app.add_webhook(path)(func)
300
+ if len(app.registered_webhooks) == 1:
301
+ # Register `app.launch` to run at exit (only once)
302
+ atexit.register(app.launch)
303
+
304
+ @wraps(app.launch)
305
+ def _launch_now():
306
+ # Run the app directly (without waiting atexit)
307
+ atexit.unregister(app.launch)
308
+ app.launch()
309
+
310
+ func.launch = _launch_now # type: ignore
311
+ return func
312
+
313
+ return _inner
314
+
315
+
316
+ def _get_global_app() -> WebhooksServer:
317
+ global _global_app
318
+ if _global_app is None:
319
+ _global_app = WebhooksServer()
320
+ return _global_app
321
+
322
+
323
+ def _warn_on_empty_secret(webhook_secret: Optional[str]) -> None:
324
+ if webhook_secret is None:
325
+ print("Webhook secret is not defined. This means your webhook endpoints will be open to everyone.")
326
+ print(
327
+ "To add a secret, set `WEBHOOK_SECRET` as environment variable or pass it at initialization: "
328
+ "\n\t`app = WebhooksServer(webhook_secret='my_secret', ...)`"
329
+ )
330
+ print(
331
+ "For more details about webhook secrets, please refer to"
332
+ " https://huggingface.co/docs/hub/webhooks#webhook-secret."
333
+ )
334
+ else:
335
+ print("Webhook secret is correctly defined.")
336
+
337
+
338
+ def _get_webhook_doc_url(webhook_name: str, webhook_path: str) -> str:
339
+ """Returns the anchor to a given webhook in the docs (experimental)"""
340
+ return "/docs#/default/" + webhook_name + webhook_path.replace("/", "_") + "_post"
341
+
342
+
343
+ def _wrap_webhook_to_check_secret(func: Callable, webhook_secret: str) -> Callable:
344
+ """Wraps a webhook function to check the webhook secret before calling the function.
345
+
346
+ This is a hacky way to add the `request` parameter to the function signature. Since FastAPI based itself on route
347
+ parameters to inject the values to the function, we need to hack the function signature to retrieve the `Request`
348
+ object (and hence the headers). A far cleaner solution would be to use a middleware. However, since
349
+ `fastapi==0.90.1`, a middleware cannot be added once the app has started. And since the FastAPI app is started by
350
+ Gradio internals (and not by us), we cannot add a middleware.
351
+
352
+ This method is called only when a secret has been defined by the user. If a request is sent without the
353
+ "x-webhook-secret", the function will return a 401 error (unauthorized). If the header is sent but is incorrect,
354
+ the function will return a 403 error (forbidden).
355
+
356
+ Inspired by https://stackoverflow.com/a/33112180.
357
+ """
358
+ initial_sig = inspect.signature(func)
359
+
360
+ @wraps(func)
361
+ async def _protected_func(request: Request, **kwargs):
362
+ request_secret = request.headers.get("x-webhook-secret")
363
+ if request_secret is None:
364
+ return JSONResponse({"error": "x-webhook-secret header not set."}, status_code=401)
365
+ if request_secret != webhook_secret:
366
+ return JSONResponse({"error": "Invalid webhook secret."}, status_code=403)
367
+
368
+ # Inject `request` in kwargs if required
369
+ if "request" in initial_sig.parameters:
370
+ kwargs["request"] = request
371
+
372
+ # Handle both sync and async routes
373
+ if inspect.iscoroutinefunction(func):
374
+ return await func(**kwargs)
375
+ else:
376
+ return func(**kwargs)
377
+
378
+ # Update signature to include request
379
+ if "request" not in initial_sig.parameters:
380
+ _protected_func.__signature__ = initial_sig.replace( # type: ignore
381
+ parameters=(
382
+ inspect.Parameter(name="request", kind=inspect.Parameter.POSITIONAL_OR_KEYWORD, annotation=Request),
383
+ )
384
+ + tuple(initial_sig.parameters.values())
385
+ )
386
+
387
+ # Return protected route
388
+ return _protected_func
venv/lib/python3.13/site-packages/huggingface_hub/community.py ADDED
@@ -0,0 +1,355 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Data structures to interact with Discussions and Pull Requests on the Hub.
3
+
4
+ See [the Discussions and Pull Requests guide](https://huggingface.co/docs/hub/repositories-pull-requests-discussions)
5
+ for more information on Pull Requests, Discussions, and the community tab.
6
+ """
7
+
8
+ from dataclasses import dataclass
9
+ from datetime import datetime
10
+ from typing import List, Literal, Optional, Union
11
+
12
+ from . import constants
13
+ from .utils import parse_datetime
14
+
15
+
16
+ DiscussionStatus = Literal["open", "closed", "merged", "draft"]
17
+
18
+
19
+ @dataclass
20
+ class Discussion:
21
+ """
22
+ A Discussion or Pull Request on the Hub.
23
+
24
+ This dataclass is not intended to be instantiated directly.
25
+
26
+ Attributes:
27
+ title (`str`):
28
+ The title of the Discussion / Pull Request
29
+ status (`str`):
30
+ The status of the Discussion / Pull Request.
31
+ It must be one of:
32
+ * `"open"`
33
+ * `"closed"`
34
+ * `"merged"` (only for Pull Requests )
35
+ * `"draft"` (only for Pull Requests )
36
+ num (`int`):
37
+ The number of the Discussion / Pull Request.
38
+ repo_id (`str`):
39
+ The id (`"{namespace}/{repo_name}"`) of the repo on which
40
+ the Discussion / Pull Request was open.
41
+ repo_type (`str`):
42
+ The type of the repo on which the Discussion / Pull Request was open.
43
+ Possible values are: `"model"`, `"dataset"`, `"space"`.
44
+ author (`str`):
45
+ The username of the Discussion / Pull Request author.
46
+ Can be `"deleted"` if the user has been deleted since.
47
+ is_pull_request (`bool`):
48
+ Whether or not this is a Pull Request.
49
+ created_at (`datetime`):
50
+ The `datetime` of creation of the Discussion / Pull Request.
51
+ endpoint (`str`):
52
+ Endpoint of the Hub. Default is https://huggingface.co.
53
+ git_reference (`str`, *optional*):
54
+ (property) Git reference to which changes can be pushed if this is a Pull Request, `None` otherwise.
55
+ url (`str`):
56
+ (property) URL of the discussion on the Hub.
57
+ """
58
+
59
+ title: str
60
+ status: DiscussionStatus
61
+ num: int
62
+ repo_id: str
63
+ repo_type: str
64
+ author: str
65
+ is_pull_request: bool
66
+ created_at: datetime
67
+ endpoint: str
68
+
69
+ @property
70
+ def git_reference(self) -> Optional[str]:
71
+ """
72
+ If this is a Pull Request , returns the git reference to which changes can be pushed.
73
+ Returns `None` otherwise.
74
+ """
75
+ if self.is_pull_request:
76
+ return f"refs/pr/{self.num}"
77
+ return None
78
+
79
+ @property
80
+ def url(self) -> str:
81
+ """Returns the URL of the discussion on the Hub."""
82
+ if self.repo_type is None or self.repo_type == constants.REPO_TYPE_MODEL:
83
+ return f"{self.endpoint}/{self.repo_id}/discussions/{self.num}"
84
+ return f"{self.endpoint}/{self.repo_type}s/{self.repo_id}/discussions/{self.num}"
85
+
86
+
87
+ @dataclass
88
+ class DiscussionWithDetails(Discussion):
89
+ """
90
+ Subclass of [`Discussion`].
91
+
92
+ Attributes:
93
+ title (`str`):
94
+ The title of the Discussion / Pull Request
95
+ status (`str`):
96
+ The status of the Discussion / Pull Request.
97
+ It can be one of:
98
+ * `"open"`
99
+ * `"closed"`
100
+ * `"merged"` (only for Pull Requests )
101
+ * `"draft"` (only for Pull Requests )
102
+ num (`int`):
103
+ The number of the Discussion / Pull Request.
104
+ repo_id (`str`):
105
+ The id (`"{namespace}/{repo_name}"`) of the repo on which
106
+ the Discussion / Pull Request was open.
107
+ repo_type (`str`):
108
+ The type of the repo on which the Discussion / Pull Request was open.
109
+ Possible values are: `"model"`, `"dataset"`, `"space"`.
110
+ author (`str`):
111
+ The username of the Discussion / Pull Request author.
112
+ Can be `"deleted"` if the user has been deleted since.
113
+ is_pull_request (`bool`):
114
+ Whether or not this is a Pull Request.
115
+ created_at (`datetime`):
116
+ The `datetime` of creation of the Discussion / Pull Request.
117
+ events (`list` of [`DiscussionEvent`])
118
+ The list of [`DiscussionEvents`] in this Discussion or Pull Request.
119
+ conflicting_files (`Union[List[str], bool, None]`, *optional*):
120
+ A list of conflicting files if this is a Pull Request.
121
+ `None` if `self.is_pull_request` is `False`.
122
+ `True` if there are conflicting files but the list can't be retrieved.
123
+ target_branch (`str`, *optional*):
124
+ The branch into which changes are to be merged if this is a
125
+ Pull Request . `None` if `self.is_pull_request` is `False`.
126
+ merge_commit_oid (`str`, *optional*):
127
+ If this is a merged Pull Request , this is set to the OID / SHA of
128
+ the merge commit, `None` otherwise.
129
+ diff (`str`, *optional*):
130
+ The git diff if this is a Pull Request , `None` otherwise.
131
+ endpoint (`str`):
132
+ Endpoint of the Hub. Default is https://huggingface.co.
133
+ git_reference (`str`, *optional*):
134
+ (property) Git reference to which changes can be pushed if this is a Pull Request, `None` otherwise.
135
+ url (`str`):
136
+ (property) URL of the discussion on the Hub.
137
+ """
138
+
139
+ events: List["DiscussionEvent"]
140
+ conflicting_files: Union[List[str], bool, None]
141
+ target_branch: Optional[str]
142
+ merge_commit_oid: Optional[str]
143
+ diff: Optional[str]
144
+
145
+
146
+ @dataclass
147
+ class DiscussionEvent:
148
+ """
149
+ An event in a Discussion or Pull Request.
150
+
151
+ Use concrete classes:
152
+ * [`DiscussionComment`]
153
+ * [`DiscussionStatusChange`]
154
+ * [`DiscussionCommit`]
155
+ * [`DiscussionTitleChange`]
156
+
157
+ Attributes:
158
+ id (`str`):
159
+ The ID of the event. An hexadecimal string.
160
+ type (`str`):
161
+ The type of the event.
162
+ created_at (`datetime`):
163
+ A [`datetime`](https://docs.python.org/3/library/datetime.html?highlight=datetime#datetime.datetime)
164
+ object holding the creation timestamp for the event.
165
+ author (`str`):
166
+ The username of the Discussion / Pull Request author.
167
+ Can be `"deleted"` if the user has been deleted since.
168
+ """
169
+
170
+ id: str
171
+ type: str
172
+ created_at: datetime
173
+ author: str
174
+
175
+ _event: dict
176
+ """Stores the original event data, in case we need to access it later."""
177
+
178
+
179
+ @dataclass
180
+ class DiscussionComment(DiscussionEvent):
181
+ """A comment in a Discussion / Pull Request.
182
+
183
+ Subclass of [`DiscussionEvent`].
184
+
185
+
186
+ Attributes:
187
+ id (`str`):
188
+ The ID of the event. An hexadecimal string.
189
+ type (`str`):
190
+ The type of the event.
191
+ created_at (`datetime`):
192
+ A [`datetime`](https://docs.python.org/3/library/datetime.html?highlight=datetime#datetime.datetime)
193
+ object holding the creation timestamp for the event.
194
+ author (`str`):
195
+ The username of the Discussion / Pull Request author.
196
+ Can be `"deleted"` if the user has been deleted since.
197
+ content (`str`):
198
+ The raw markdown content of the comment. Mentions, links and images are not rendered.
199
+ edited (`bool`):
200
+ Whether or not this comment has been edited.
201
+ hidden (`bool`):
202
+ Whether or not this comment has been hidden.
203
+ """
204
+
205
+ content: str
206
+ edited: bool
207
+ hidden: bool
208
+
209
+ @property
210
+ def rendered(self) -> str:
211
+ """The rendered comment, as a HTML string"""
212
+ return self._event["data"]["latest"]["html"]
213
+
214
+ @property
215
+ def last_edited_at(self) -> datetime:
216
+ """The last edit time, as a `datetime` object."""
217
+ return parse_datetime(self._event["data"]["latest"]["updatedAt"])
218
+
219
+ @property
220
+ def last_edited_by(self) -> str:
221
+ """The last edit time, as a `datetime` object."""
222
+ return self._event["data"]["latest"].get("author", {}).get("name", "deleted")
223
+
224
+ @property
225
+ def edit_history(self) -> List[dict]:
226
+ """The edit history of the comment"""
227
+ return self._event["data"]["history"]
228
+
229
+ @property
230
+ def number_of_edits(self) -> int:
231
+ return len(self.edit_history)
232
+
233
+
234
+ @dataclass
235
+ class DiscussionStatusChange(DiscussionEvent):
236
+ """A change of status in a Discussion / Pull Request.
237
+
238
+ Subclass of [`DiscussionEvent`].
239
+
240
+ Attributes:
241
+ id (`str`):
242
+ The ID of the event. An hexadecimal string.
243
+ type (`str`):
244
+ The type of the event.
245
+ created_at (`datetime`):
246
+ A [`datetime`](https://docs.python.org/3/library/datetime.html?highlight=datetime#datetime.datetime)
247
+ object holding the creation timestamp for the event.
248
+ author (`str`):
249
+ The username of the Discussion / Pull Request author.
250
+ Can be `"deleted"` if the user has been deleted since.
251
+ new_status (`str`):
252
+ The status of the Discussion / Pull Request after the change.
253
+ It can be one of:
254
+ * `"open"`
255
+ * `"closed"`
256
+ * `"merged"` (only for Pull Requests )
257
+ """
258
+
259
+ new_status: str
260
+
261
+
262
+ @dataclass
263
+ class DiscussionCommit(DiscussionEvent):
264
+ """A commit in a Pull Request.
265
+
266
+ Subclass of [`DiscussionEvent`].
267
+
268
+ Attributes:
269
+ id (`str`):
270
+ The ID of the event. An hexadecimal string.
271
+ type (`str`):
272
+ The type of the event.
273
+ created_at (`datetime`):
274
+ A [`datetime`](https://docs.python.org/3/library/datetime.html?highlight=datetime#datetime.datetime)
275
+ object holding the creation timestamp for the event.
276
+ author (`str`):
277
+ The username of the Discussion / Pull Request author.
278
+ Can be `"deleted"` if the user has been deleted since.
279
+ summary (`str`):
280
+ The summary of the commit.
281
+ oid (`str`):
282
+ The OID / SHA of the commit, as a hexadecimal string.
283
+ """
284
+
285
+ summary: str
286
+ oid: str
287
+
288
+
289
+ @dataclass
290
+ class DiscussionTitleChange(DiscussionEvent):
291
+ """A rename event in a Discussion / Pull Request.
292
+
293
+ Subclass of [`DiscussionEvent`].
294
+
295
+ Attributes:
296
+ id (`str`):
297
+ The ID of the event. An hexadecimal string.
298
+ type (`str`):
299
+ The type of the event.
300
+ created_at (`datetime`):
301
+ A [`datetime`](https://docs.python.org/3/library/datetime.html?highlight=datetime#datetime.datetime)
302
+ object holding the creation timestamp for the event.
303
+ author (`str`):
304
+ The username of the Discussion / Pull Request author.
305
+ Can be `"deleted"` if the user has been deleted since.
306
+ old_title (`str`):
307
+ The previous title for the Discussion / Pull Request.
308
+ new_title (`str`):
309
+ The new title.
310
+ """
311
+
312
+ old_title: str
313
+ new_title: str
314
+
315
+
316
+ def deserialize_event(event: dict) -> DiscussionEvent:
317
+ """Instantiates a [`DiscussionEvent`] from a dict"""
318
+ event_id: str = event["id"]
319
+ event_type: str = event["type"]
320
+ created_at = parse_datetime(event["createdAt"])
321
+
322
+ common_args = dict(
323
+ id=event_id,
324
+ type=event_type,
325
+ created_at=created_at,
326
+ author=event.get("author", {}).get("name", "deleted"),
327
+ _event=event,
328
+ )
329
+
330
+ if event_type == "comment":
331
+ return DiscussionComment(
332
+ **common_args,
333
+ edited=event["data"]["edited"],
334
+ hidden=event["data"]["hidden"],
335
+ content=event["data"]["latest"]["raw"],
336
+ )
337
+ if event_type == "status-change":
338
+ return DiscussionStatusChange(
339
+ **common_args,
340
+ new_status=event["data"]["status"],
341
+ )
342
+ if event_type == "commit":
343
+ return DiscussionCommit(
344
+ **common_args,
345
+ summary=event["data"]["subject"],
346
+ oid=event["data"]["oid"],
347
+ )
348
+ if event_type == "title-change":
349
+ return DiscussionTitleChange(
350
+ **common_args,
351
+ old_title=event["data"]["from"],
352
+ new_title=event["data"]["to"],
353
+ )
354
+
355
+ return DiscussionEvent(**common_args)
venv/lib/python3.13/site-packages/huggingface_hub/constants.py ADDED
@@ -0,0 +1,294 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import typing
4
+ from typing import Literal, Optional, Tuple
5
+
6
+
7
+ # Possible values for env variables
8
+
9
+
10
+ ENV_VARS_TRUE_VALUES = {"1", "ON", "YES", "TRUE"}
11
+ ENV_VARS_TRUE_AND_AUTO_VALUES = ENV_VARS_TRUE_VALUES.union({"AUTO"})
12
+
13
+
14
+ def _is_true(value: Optional[str]) -> bool:
15
+ if value is None:
16
+ return False
17
+ return value.upper() in ENV_VARS_TRUE_VALUES
18
+
19
+
20
+ def _as_int(value: Optional[str]) -> Optional[int]:
21
+ if value is None:
22
+ return None
23
+ return int(value)
24
+
25
+
26
+ # Constants for file downloads
27
+
28
+ PYTORCH_WEIGHTS_NAME = "pytorch_model.bin"
29
+ TF2_WEIGHTS_NAME = "tf_model.h5"
30
+ TF_WEIGHTS_NAME = "model.ckpt"
31
+ FLAX_WEIGHTS_NAME = "flax_model.msgpack"
32
+ CONFIG_NAME = "config.json"
33
+ REPOCARD_NAME = "README.md"
34
+ DEFAULT_ETAG_TIMEOUT = 10
35
+ DEFAULT_DOWNLOAD_TIMEOUT = 10
36
+ DEFAULT_REQUEST_TIMEOUT = 10
37
+ DOWNLOAD_CHUNK_SIZE = 10 * 1024 * 1024
38
+ HF_TRANSFER_CONCURRENCY = 100
39
+ MAX_HTTP_DOWNLOAD_SIZE = 50 * 1000 * 1000 * 1000 # 50 GB
40
+
41
+ # Constants for serialization
42
+
43
+ PYTORCH_WEIGHTS_FILE_PATTERN = "pytorch_model{suffix}.bin" # Unsafe pickle: use safetensors instead
44
+ SAFETENSORS_WEIGHTS_FILE_PATTERN = "model{suffix}.safetensors"
45
+ TF2_WEIGHTS_FILE_PATTERN = "tf_model{suffix}.h5"
46
+
47
+ # Constants for safetensors repos
48
+
49
+ SAFETENSORS_SINGLE_FILE = "model.safetensors"
50
+ SAFETENSORS_INDEX_FILE = "model.safetensors.index.json"
51
+ SAFETENSORS_MAX_HEADER_LENGTH = 25_000_000
52
+
53
+ # Timeout of aquiring file lock and logging the attempt
54
+ FILELOCK_LOG_EVERY_SECONDS = 10
55
+
56
+ # Git-related constants
57
+
58
+ DEFAULT_REVISION = "main"
59
+ REGEX_COMMIT_OID = re.compile(r"[A-Fa-f0-9]{5,40}")
60
+
61
+ HUGGINGFACE_CO_URL_HOME = "https://huggingface.co/"
62
+
63
+ _staging_mode = _is_true(os.environ.get("HUGGINGFACE_CO_STAGING"))
64
+
65
+ _HF_DEFAULT_ENDPOINT = "https://huggingface.co"
66
+ _HF_DEFAULT_STAGING_ENDPOINT = "https://hub-ci.huggingface.co"
67
+ ENDPOINT = os.getenv("HF_ENDPOINT", _HF_DEFAULT_ENDPOINT).rstrip("/")
68
+ HUGGINGFACE_CO_URL_TEMPLATE = ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
69
+
70
+ if _staging_mode:
71
+ ENDPOINT = _HF_DEFAULT_STAGING_ENDPOINT
72
+ HUGGINGFACE_CO_URL_TEMPLATE = _HF_DEFAULT_STAGING_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
73
+
74
+ HUGGINGFACE_HEADER_X_REPO_COMMIT = "X-Repo-Commit"
75
+ HUGGINGFACE_HEADER_X_LINKED_ETAG = "X-Linked-Etag"
76
+ HUGGINGFACE_HEADER_X_LINKED_SIZE = "X-Linked-Size"
77
+ HUGGINGFACE_HEADER_X_BILL_TO = "X-HF-Bill-To"
78
+
79
+ INFERENCE_ENDPOINT = os.environ.get("HF_INFERENCE_ENDPOINT", "https://api-inference.huggingface.co")
80
+
81
+ # See https://huggingface.co/docs/inference-endpoints/index
82
+ INFERENCE_ENDPOINTS_ENDPOINT = "https://api.endpoints.huggingface.cloud/v2"
83
+ INFERENCE_CATALOG_ENDPOINT = "https://endpoints.huggingface.co/api/catalog"
84
+
85
+ # See https://api.endpoints.huggingface.cloud/#post-/v2/endpoint/-namespace-
86
+ INFERENCE_ENDPOINT_IMAGE_KEYS = [
87
+ "custom",
88
+ "huggingface",
89
+ "huggingfaceNeuron",
90
+ "llamacpp",
91
+ "tei",
92
+ "tgi",
93
+ "tgiNeuron",
94
+ ]
95
+
96
+ # Proxy for third-party providers
97
+ INFERENCE_PROXY_TEMPLATE = "https://router.huggingface.co/{provider}"
98
+
99
+ REPO_ID_SEPARATOR = "--"
100
+ # ^ this substring is not allowed in repo_ids on hf.co
101
+ # and is the canonical one we use for serialization of repo ids elsewhere.
102
+
103
+
104
+ REPO_TYPE_DATASET = "dataset"
105
+ REPO_TYPE_SPACE = "space"
106
+ REPO_TYPE_MODEL = "model"
107
+ REPO_TYPES = [None, REPO_TYPE_MODEL, REPO_TYPE_DATASET, REPO_TYPE_SPACE]
108
+ SPACES_SDK_TYPES = ["gradio", "streamlit", "docker", "static"]
109
+
110
+ REPO_TYPES_URL_PREFIXES = {
111
+ REPO_TYPE_DATASET: "datasets/",
112
+ REPO_TYPE_SPACE: "spaces/",
113
+ }
114
+ REPO_TYPES_MAPPING = {
115
+ "datasets": REPO_TYPE_DATASET,
116
+ "spaces": REPO_TYPE_SPACE,
117
+ "models": REPO_TYPE_MODEL,
118
+ }
119
+
120
+ DiscussionTypeFilter = Literal["all", "discussion", "pull_request"]
121
+ DISCUSSION_TYPES: Tuple[DiscussionTypeFilter, ...] = typing.get_args(DiscussionTypeFilter)
122
+ DiscussionStatusFilter = Literal["all", "open", "closed"]
123
+ DISCUSSION_STATUS: Tuple[DiscussionTypeFilter, ...] = typing.get_args(DiscussionStatusFilter)
124
+
125
+ # Webhook subscription types
126
+ WEBHOOK_DOMAIN_T = Literal["repo", "discussions"]
127
+
128
+ # default cache
129
+ default_home = os.path.join(os.path.expanduser("~"), ".cache")
130
+ HF_HOME = os.path.expandvars(
131
+ os.path.expanduser(
132
+ os.getenv(
133
+ "HF_HOME",
134
+ os.path.join(os.getenv("XDG_CACHE_HOME", default_home), "huggingface"),
135
+ )
136
+ )
137
+ )
138
+ hf_cache_home = HF_HOME # for backward compatibility. TODO: remove this in 1.0.0
139
+
140
+ default_cache_path = os.path.join(HF_HOME, "hub")
141
+ default_assets_cache_path = os.path.join(HF_HOME, "assets")
142
+
143
+ # Legacy env variables
144
+ HUGGINGFACE_HUB_CACHE = os.getenv("HUGGINGFACE_HUB_CACHE", default_cache_path)
145
+ HUGGINGFACE_ASSETS_CACHE = os.getenv("HUGGINGFACE_ASSETS_CACHE", default_assets_cache_path)
146
+
147
+ # New env variables
148
+ HF_HUB_CACHE = os.path.expandvars(
149
+ os.path.expanduser(
150
+ os.getenv(
151
+ "HF_HUB_CACHE",
152
+ HUGGINGFACE_HUB_CACHE,
153
+ )
154
+ )
155
+ )
156
+ HF_ASSETS_CACHE = os.path.expandvars(
157
+ os.path.expanduser(
158
+ os.getenv(
159
+ "HF_ASSETS_CACHE",
160
+ HUGGINGFACE_ASSETS_CACHE,
161
+ )
162
+ )
163
+ )
164
+
165
+ HF_HUB_OFFLINE = _is_true(os.environ.get("HF_HUB_OFFLINE") or os.environ.get("TRANSFORMERS_OFFLINE"))
166
+
167
+ # If set, log level will be set to DEBUG and all requests made to the Hub will be logged
168
+ # as curl commands for reproducibility.
169
+ HF_DEBUG = _is_true(os.environ.get("HF_DEBUG"))
170
+
171
+ # Opt-out from telemetry requests
172
+ HF_HUB_DISABLE_TELEMETRY = (
173
+ _is_true(os.environ.get("HF_HUB_DISABLE_TELEMETRY")) # HF-specific env variable
174
+ or _is_true(os.environ.get("DISABLE_TELEMETRY"))
175
+ or _is_true(os.environ.get("DO_NOT_TRACK")) # https://consoledonottrack.com/
176
+ )
177
+
178
+ HF_TOKEN_PATH = os.path.expandvars(
179
+ os.path.expanduser(
180
+ os.getenv(
181
+ "HF_TOKEN_PATH",
182
+ os.path.join(HF_HOME, "token"),
183
+ )
184
+ )
185
+ )
186
+ HF_STORED_TOKENS_PATH = os.path.join(os.path.dirname(HF_TOKEN_PATH), "stored_tokens")
187
+
188
+ if _staging_mode:
189
+ # In staging mode, we use a different cache to ensure we don't mix up production and staging data or tokens
190
+ # In practice in `huggingface_hub` tests, we monkeypatch these values with temporary directories. The following
191
+ # lines are only used in third-party libraries tests (e.g. `transformers`, `diffusers`, etc.).
192
+ _staging_home = os.path.join(os.path.expanduser("~"), ".cache", "huggingface_staging")
193
+ HUGGINGFACE_HUB_CACHE = os.path.join(_staging_home, "hub")
194
+ HF_TOKEN_PATH = os.path.join(_staging_home, "token")
195
+
196
+ # Here, `True` will disable progress bars globally without possibility of enabling it
197
+ # programmatically. `False` will enable them without possibility of disabling them.
198
+ # If environment variable is not set (None), then the user is free to enable/disable
199
+ # them programmatically.
200
+ # TL;DR: env variable has priority over code
201
+ __HF_HUB_DISABLE_PROGRESS_BARS = os.environ.get("HF_HUB_DISABLE_PROGRESS_BARS")
202
+ HF_HUB_DISABLE_PROGRESS_BARS: Optional[bool] = (
203
+ _is_true(__HF_HUB_DISABLE_PROGRESS_BARS) if __HF_HUB_DISABLE_PROGRESS_BARS is not None else None
204
+ )
205
+
206
+ # Disable warning on machines that do not support symlinks (e.g. Windows non-developer)
207
+ HF_HUB_DISABLE_SYMLINKS_WARNING: bool = _is_true(os.environ.get("HF_HUB_DISABLE_SYMLINKS_WARNING"))
208
+
209
+ # Disable warning when using experimental features
210
+ HF_HUB_DISABLE_EXPERIMENTAL_WARNING: bool = _is_true(os.environ.get("HF_HUB_DISABLE_EXPERIMENTAL_WARNING"))
211
+
212
+ # Disable sending the cached token by default is all HTTP requests to the Hub
213
+ HF_HUB_DISABLE_IMPLICIT_TOKEN: bool = _is_true(os.environ.get("HF_HUB_DISABLE_IMPLICIT_TOKEN"))
214
+
215
+ # Enable fast-download using external dependency "hf_transfer"
216
+ # See:
217
+ # - https://pypi.org/project/hf-transfer/
218
+ # - https://github.com/huggingface/hf_transfer (private)
219
+ HF_HUB_ENABLE_HF_TRANSFER: bool = _is_true(os.environ.get("HF_HUB_ENABLE_HF_TRANSFER"))
220
+
221
+
222
+ # UNUSED
223
+ # We don't use symlinks in local dir anymore.
224
+ HF_HUB_LOCAL_DIR_AUTO_SYMLINK_THRESHOLD: int = (
225
+ _as_int(os.environ.get("HF_HUB_LOCAL_DIR_AUTO_SYMLINK_THRESHOLD")) or 5 * 1024 * 1024
226
+ )
227
+
228
+ # Used to override the etag timeout on a system level
229
+ HF_HUB_ETAG_TIMEOUT: int = _as_int(os.environ.get("HF_HUB_ETAG_TIMEOUT")) or DEFAULT_ETAG_TIMEOUT
230
+
231
+ # Used to override the get request timeout on a system level
232
+ HF_HUB_DOWNLOAD_TIMEOUT: int = _as_int(os.environ.get("HF_HUB_DOWNLOAD_TIMEOUT")) or DEFAULT_DOWNLOAD_TIMEOUT
233
+
234
+ # Allows to add information about the requester in the user-agent (eg. partner name)
235
+ HF_HUB_USER_AGENT_ORIGIN: Optional[str] = os.environ.get("HF_HUB_USER_AGENT_ORIGIN")
236
+
237
+ # List frameworks that are handled by the InferenceAPI service. Useful to scan endpoints and check which models are
238
+ # deployed and running. Since 95% of the models are using the top 4 frameworks listed below, we scan only those by
239
+ # default. We still keep the full list of supported frameworks in case we want to scan all of them.
240
+ MAIN_INFERENCE_API_FRAMEWORKS = [
241
+ "diffusers",
242
+ "sentence-transformers",
243
+ "text-generation-inference",
244
+ "transformers",
245
+ ]
246
+
247
+ ALL_INFERENCE_API_FRAMEWORKS = MAIN_INFERENCE_API_FRAMEWORKS + [
248
+ "adapter-transformers",
249
+ "allennlp",
250
+ "asteroid",
251
+ "bertopic",
252
+ "doctr",
253
+ "espnet",
254
+ "fairseq",
255
+ "fastai",
256
+ "fasttext",
257
+ "flair",
258
+ "k2",
259
+ "keras",
260
+ "mindspore",
261
+ "nemo",
262
+ "open_clip",
263
+ "paddlenlp",
264
+ "peft",
265
+ "pyannote-audio",
266
+ "sklearn",
267
+ "spacy",
268
+ "span-marker",
269
+ "speechbrain",
270
+ "stanza",
271
+ "timm",
272
+ ]
273
+
274
+ # If OAuth didn't work after 2 redirects, there's likely a third-party cookie issue in the Space iframe view.
275
+ # In this case, we redirect the user to the non-iframe view.
276
+ OAUTH_MAX_REDIRECTS = 2
277
+
278
+ # OAuth-related environment variables injected by the Space
279
+ OAUTH_CLIENT_ID = os.environ.get("OAUTH_CLIENT_ID")
280
+ OAUTH_CLIENT_SECRET = os.environ.get("OAUTH_CLIENT_SECRET")
281
+ OAUTH_SCOPES = os.environ.get("OAUTH_SCOPES")
282
+ OPENID_PROVIDER_URL = os.environ.get("OPENID_PROVIDER_URL")
283
+
284
+ # Xet constants
285
+ HUGGINGFACE_HEADER_X_XET_ENDPOINT = "X-Xet-Cas-Url"
286
+ HUGGINGFACE_HEADER_X_XET_ACCESS_TOKEN = "X-Xet-Access-Token"
287
+ HUGGINGFACE_HEADER_X_XET_EXPIRATION = "X-Xet-Token-Expiration"
288
+ HUGGINGFACE_HEADER_X_XET_HASH = "X-Xet-Hash"
289
+ HUGGINGFACE_HEADER_X_XET_REFRESH_ROUTE = "X-Xet-Refresh-Route"
290
+ HUGGINGFACE_HEADER_LINK_XET_AUTH_KEY = "xet-auth"
291
+
292
+ default_xet_cache_path = os.path.join(HF_HOME, "xet")
293
+ HF_XET_CACHE = os.getenv("HF_XET_CACHE", default_xet_cache_path)
294
+ HF_HUB_DISABLE_XET: bool = _is_true(os.environ.get("HF_HUB_DISABLE_XET"))
venv/lib/python3.13/site-packages/huggingface_hub/dataclasses.py ADDED
@@ -0,0 +1,484 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ from dataclasses import _MISSING_TYPE, MISSING, Field, field, fields
3
+ from functools import wraps
4
+ from typing import (
5
+ Any,
6
+ Callable,
7
+ Dict,
8
+ ForwardRef,
9
+ List,
10
+ Literal,
11
+ Optional,
12
+ Tuple,
13
+ Type,
14
+ TypeVar,
15
+ Union,
16
+ get_args,
17
+ get_origin,
18
+ overload,
19
+ )
20
+
21
+ from .errors import (
22
+ StrictDataclassClassValidationError,
23
+ StrictDataclassDefinitionError,
24
+ StrictDataclassFieldValidationError,
25
+ )
26
+
27
+
28
+ Validator_T = Callable[[Any], None]
29
+ T = TypeVar("T")
30
+
31
+
32
+ # The overload decorator helps type checkers understand the different return types
33
+ @overload
34
+ def strict(cls: Type[T]) -> Type[T]: ...
35
+
36
+
37
+ @overload
38
+ def strict(*, accept_kwargs: bool = False) -> Callable[[Type[T]], Type[T]]: ...
39
+
40
+
41
+ def strict(
42
+ cls: Optional[Type[T]] = None, *, accept_kwargs: bool = False
43
+ ) -> Union[Type[T], Callable[[Type[T]], Type[T]]]:
44
+ """
45
+ Decorator to add strict validation to a dataclass.
46
+
47
+ This decorator must be used on top of `@dataclass` to ensure IDEs and static typing tools
48
+ recognize the class as a dataclass.
49
+
50
+ Can be used with or without arguments:
51
+ - `@strict`
52
+ - `@strict(accept_kwargs=True)`
53
+
54
+ Args:
55
+ cls:
56
+ The class to convert to a strict dataclass.
57
+ accept_kwargs (`bool`, *optional*):
58
+ If True, allows arbitrary keyword arguments in `__init__`. Defaults to False.
59
+
60
+ Returns:
61
+ The enhanced dataclass with strict validation on field assignment.
62
+
63
+ Example:
64
+ ```py
65
+ >>> from dataclasses import dataclass
66
+ >>> from huggingface_hub.dataclasses import as_validated_field, strict, validated_field
67
+
68
+ >>> @as_validated_field
69
+ >>> def positive_int(value: int):
70
+ ... if not value >= 0:
71
+ ... raise ValueError(f"Value must be positive, got {value}")
72
+
73
+ >>> @strict(accept_kwargs=True)
74
+ ... @dataclass
75
+ ... class User:
76
+ ... name: str
77
+ ... age: int = positive_int(default=10)
78
+
79
+ # Initialize
80
+ >>> User(name="John")
81
+ User(name='John', age=10)
82
+
83
+ # Extra kwargs are accepted
84
+ >>> User(name="John", age=30, lastname="Doe")
85
+ User(name='John', age=30, *lastname='Doe')
86
+
87
+ # Invalid type => raises
88
+ >>> User(name="John", age="30")
89
+ huggingface_hub.errors.StrictDataclassFieldValidationError: Validation error for field 'age':
90
+ TypeError: Field 'age' expected int, got str (value: '30')
91
+
92
+ # Invalid value => raises
93
+ >>> User(name="John", age=-1)
94
+ huggingface_hub.errors.StrictDataclassFieldValidationError: Validation error for field 'age':
95
+ ValueError: Value must be positive, got -1
96
+ ```
97
+ """
98
+
99
+ def wrap(cls: Type[T]) -> Type[T]:
100
+ if not hasattr(cls, "__dataclass_fields__"):
101
+ raise StrictDataclassDefinitionError(
102
+ f"Class '{cls.__name__}' must be a dataclass before applying @strict."
103
+ )
104
+
105
+ # List and store validators
106
+ field_validators: Dict[str, List[Validator_T]] = {}
107
+ for f in fields(cls): # type: ignore [arg-type]
108
+ validators = []
109
+ validators.append(_create_type_validator(f))
110
+ custom_validator = f.metadata.get("validator")
111
+ if custom_validator is not None:
112
+ if not isinstance(custom_validator, list):
113
+ custom_validator = [custom_validator]
114
+ for validator in custom_validator:
115
+ if not _is_validator(validator):
116
+ raise StrictDataclassDefinitionError(
117
+ f"Invalid validator for field '{f.name}': {validator}. Must be a callable taking a single argument."
118
+ )
119
+ validators.extend(custom_validator)
120
+ field_validators[f.name] = validators
121
+ cls.__validators__ = field_validators # type: ignore
122
+
123
+ # Override __setattr__ to validate fields on assignment
124
+ original_setattr = cls.__setattr__
125
+
126
+ def __strict_setattr__(self: Any, name: str, value: Any) -> None:
127
+ """Custom __setattr__ method for strict dataclasses."""
128
+ # Run all validators
129
+ for validator in self.__validators__.get(name, []):
130
+ try:
131
+ validator(value)
132
+ except (ValueError, TypeError) as e:
133
+ raise StrictDataclassFieldValidationError(field=name, cause=e) from e
134
+
135
+ # If validation passed, set the attribute
136
+ original_setattr(self, name, value)
137
+
138
+ cls.__setattr__ = __strict_setattr__ # type: ignore[method-assign]
139
+
140
+ if accept_kwargs:
141
+ # (optional) Override __init__ to accept arbitrary keyword arguments
142
+ original_init = cls.__init__
143
+
144
+ @wraps(original_init)
145
+ def __init__(self, **kwargs: Any) -> None:
146
+ # Extract only the fields that are part of the dataclass
147
+ dataclass_fields = {f.name for f in fields(cls)} # type: ignore [arg-type]
148
+ standard_kwargs = {k: v for k, v in kwargs.items() if k in dataclass_fields}
149
+
150
+ # Call the original __init__ with standard fields
151
+ original_init(self, **standard_kwargs)
152
+
153
+ # Add any additional kwargs as attributes
154
+ for name, value in kwargs.items():
155
+ if name not in dataclass_fields:
156
+ self.__setattr__(name, value)
157
+
158
+ cls.__init__ = __init__ # type: ignore[method-assign]
159
+
160
+ # (optional) Override __repr__ to include additional kwargs
161
+ original_repr = cls.__repr__
162
+
163
+ @wraps(original_repr)
164
+ def __repr__(self) -> str:
165
+ # Call the original __repr__ to get the standard fields
166
+ standard_repr = original_repr(self)
167
+
168
+ # Get additional kwargs
169
+ additional_kwargs = [
170
+ # add a '*' in front of additional kwargs to let the user know they are not part of the dataclass
171
+ f"*{k}={v!r}"
172
+ for k, v in self.__dict__.items()
173
+ if k not in cls.__dataclass_fields__ # type: ignore [attr-defined]
174
+ ]
175
+ additional_repr = ", ".join(additional_kwargs)
176
+
177
+ # Combine both representations
178
+ return f"{standard_repr[:-1]}, {additional_repr})" if additional_kwargs else standard_repr
179
+
180
+ cls.__repr__ = __repr__ # type: ignore [method-assign]
181
+
182
+ # List all public methods starting with `validate_` => class validators.
183
+ class_validators = []
184
+
185
+ for name in dir(cls):
186
+ if not name.startswith("validate_"):
187
+ continue
188
+ method = getattr(cls, name)
189
+ if not callable(method):
190
+ continue
191
+ if len(inspect.signature(method).parameters) != 1:
192
+ raise StrictDataclassDefinitionError(
193
+ f"Class '{cls.__name__}' has a class validator '{name}' that takes more than one argument."
194
+ " Class validators must take only 'self' as an argument. Methods starting with 'validate_'"
195
+ " are considered to be class validators."
196
+ )
197
+ class_validators.append(method)
198
+
199
+ cls.__class_validators__ = class_validators # type: ignore [attr-defined]
200
+
201
+ # Add `validate` method to the class, but first check if it already exists
202
+ def validate(self: T) -> None:
203
+ """Run class validators on the instance."""
204
+ for validator in cls.__class_validators__: # type: ignore [attr-defined]
205
+ try:
206
+ validator(self)
207
+ except (ValueError, TypeError) as e:
208
+ raise StrictDataclassClassValidationError(validator=validator.__name__, cause=e) from e
209
+
210
+ # Hack to be able to raise if `.validate()` already exists except if it was created by this decorator on a parent class
211
+ # (in which case we just override it)
212
+ validate.__is_defined_by_strict_decorator__ = True # type: ignore [attr-defined]
213
+
214
+ if hasattr(cls, "validate"):
215
+ if not getattr(cls.validate, "__is_defined_by_strict_decorator__", False): # type: ignore [attr-defined]
216
+ raise StrictDataclassDefinitionError(
217
+ f"Class '{cls.__name__}' already implements a method called 'validate'."
218
+ " This method name is reserved when using the @strict decorator on a dataclass."
219
+ " If you want to keep your own method, please rename it."
220
+ )
221
+
222
+ cls.validate = validate # type: ignore
223
+
224
+ # Run class validators after initialization
225
+ initial_init = cls.__init__
226
+
227
+ @wraps(initial_init)
228
+ def init_with_validate(self, *args, **kwargs) -> None:
229
+ """Run class validators after initialization."""
230
+ initial_init(self, *args, **kwargs) # type: ignore [call-arg]
231
+ cls.validate(self) # type: ignore [attr-defined]
232
+
233
+ setattr(cls, "__init__", init_with_validate)
234
+
235
+ return cls
236
+
237
+ # Return wrapped class or the decorator itself
238
+ return wrap(cls) if cls is not None else wrap
239
+
240
+
241
+ def validated_field(
242
+ validator: Union[List[Validator_T], Validator_T],
243
+ default: Union[Any, _MISSING_TYPE] = MISSING,
244
+ default_factory: Union[Callable[[], Any], _MISSING_TYPE] = MISSING,
245
+ init: bool = True,
246
+ repr: bool = True,
247
+ hash: Optional[bool] = None,
248
+ compare: bool = True,
249
+ metadata: Optional[Dict] = None,
250
+ **kwargs: Any,
251
+ ) -> Any:
252
+ """
253
+ Create a dataclass field with a custom validator.
254
+
255
+ Useful to apply several checks to a field. If only applying one rule, check out the [`as_validated_field`] decorator.
256
+
257
+ Args:
258
+ validator (`Callable` or `List[Callable]`):
259
+ A method that takes a value as input and raises ValueError/TypeError if the value is invalid.
260
+ Can be a list of validators to apply multiple checks.
261
+ **kwargs:
262
+ Additional arguments to pass to `dataclasses.field()`.
263
+
264
+ Returns:
265
+ A field with the validator attached in metadata
266
+ """
267
+ if not isinstance(validator, list):
268
+ validator = [validator]
269
+ if metadata is None:
270
+ metadata = {}
271
+ metadata["validator"] = validator
272
+ return field( # type: ignore
273
+ default=default, # type: ignore [arg-type]
274
+ default_factory=default_factory, # type: ignore [arg-type]
275
+ init=init,
276
+ repr=repr,
277
+ hash=hash,
278
+ compare=compare,
279
+ metadata=metadata,
280
+ **kwargs,
281
+ )
282
+
283
+
284
+ def as_validated_field(validator: Validator_T):
285
+ """
286
+ Decorates a validator function as a [`validated_field`] (i.e. a dataclass field with a custom validator).
287
+
288
+ Args:
289
+ validator (`Callable`):
290
+ A method that takes a value as input and raises ValueError/TypeError if the value is invalid.
291
+ """
292
+
293
+ def _inner(
294
+ default: Union[Any, _MISSING_TYPE] = MISSING,
295
+ default_factory: Union[Callable[[], Any], _MISSING_TYPE] = MISSING,
296
+ init: bool = True,
297
+ repr: bool = True,
298
+ hash: Optional[bool] = None,
299
+ compare: bool = True,
300
+ metadata: Optional[Dict] = None,
301
+ **kwargs: Any,
302
+ ):
303
+ return validated_field(
304
+ validator,
305
+ default=default,
306
+ default_factory=default_factory,
307
+ init=init,
308
+ repr=repr,
309
+ hash=hash,
310
+ compare=compare,
311
+ metadata=metadata,
312
+ **kwargs,
313
+ )
314
+
315
+ return _inner
316
+
317
+
318
+ def type_validator(name: str, value: Any, expected_type: Any) -> None:
319
+ """Validate that 'value' matches 'expected_type'."""
320
+ origin = get_origin(expected_type)
321
+ args = get_args(expected_type)
322
+
323
+ if expected_type is Any:
324
+ return
325
+ elif validator := _BASIC_TYPE_VALIDATORS.get(origin):
326
+ validator(name, value, args)
327
+ elif isinstance(expected_type, type): # simple types
328
+ _validate_simple_type(name, value, expected_type)
329
+ elif isinstance(expected_type, ForwardRef) or isinstance(expected_type, str):
330
+ return
331
+ else:
332
+ raise TypeError(f"Unsupported type for field '{name}': {expected_type}")
333
+
334
+
335
+ def _validate_union(name: str, value: Any, args: Tuple[Any, ...]) -> None:
336
+ """Validate that value matches one of the types in a Union."""
337
+ errors = []
338
+ for t in args:
339
+ try:
340
+ type_validator(name, value, t)
341
+ return # Valid if any type matches
342
+ except TypeError as e:
343
+ errors.append(str(e))
344
+
345
+ raise TypeError(
346
+ f"Field '{name}' with value {repr(value)} doesn't match any type in {args}. Errors: {'; '.join(errors)}"
347
+ )
348
+
349
+
350
+ def _validate_literal(name: str, value: Any, args: Tuple[Any, ...]) -> None:
351
+ """Validate Literal type."""
352
+ if value not in args:
353
+ raise TypeError(f"Field '{name}' expected one of {args}, got {value}")
354
+
355
+
356
+ def _validate_list(name: str, value: Any, args: Tuple[Any, ...]) -> None:
357
+ """Validate List[T] type."""
358
+ if not isinstance(value, list):
359
+ raise TypeError(f"Field '{name}' expected a list, got {type(value).__name__}")
360
+
361
+ # Validate each item in the list
362
+ item_type = args[0]
363
+ for i, item in enumerate(value):
364
+ try:
365
+ type_validator(f"{name}[{i}]", item, item_type)
366
+ except TypeError as e:
367
+ raise TypeError(f"Invalid item at index {i} in list '{name}'") from e
368
+
369
+
370
+ def _validate_dict(name: str, value: Any, args: Tuple[Any, ...]) -> None:
371
+ """Validate Dict[K, V] type."""
372
+ if not isinstance(value, dict):
373
+ raise TypeError(f"Field '{name}' expected a dict, got {type(value).__name__}")
374
+
375
+ # Validate keys and values
376
+ key_type, value_type = args
377
+ for k, v in value.items():
378
+ try:
379
+ type_validator(f"{name}.key", k, key_type)
380
+ type_validator(f"{name}[{k!r}]", v, value_type)
381
+ except TypeError as e:
382
+ raise TypeError(f"Invalid key or value in dict '{name}'") from e
383
+
384
+
385
+ def _validate_tuple(name: str, value: Any, args: Tuple[Any, ...]) -> None:
386
+ """Validate Tuple type."""
387
+ if not isinstance(value, tuple):
388
+ raise TypeError(f"Field '{name}' expected a tuple, got {type(value).__name__}")
389
+
390
+ # Handle variable-length tuples: Tuple[T, ...]
391
+ if len(args) == 2 and args[1] is Ellipsis:
392
+ for i, item in enumerate(value):
393
+ try:
394
+ type_validator(f"{name}[{i}]", item, args[0])
395
+ except TypeError as e:
396
+ raise TypeError(f"Invalid item at index {i} in tuple '{name}'") from e
397
+ # Handle fixed-length tuples: Tuple[T1, T2, ...]
398
+ elif len(args) != len(value):
399
+ raise TypeError(f"Field '{name}' expected a tuple of length {len(args)}, got {len(value)}")
400
+ else:
401
+ for i, (item, expected) in enumerate(zip(value, args)):
402
+ try:
403
+ type_validator(f"{name}[{i}]", item, expected)
404
+ except TypeError as e:
405
+ raise TypeError(f"Invalid item at index {i} in tuple '{name}'") from e
406
+
407
+
408
+ def _validate_set(name: str, value: Any, args: Tuple[Any, ...]) -> None:
409
+ """Validate Set[T] type."""
410
+ if not isinstance(value, set):
411
+ raise TypeError(f"Field '{name}' expected a set, got {type(value).__name__}")
412
+
413
+ # Validate each item in the set
414
+ item_type = args[0]
415
+ for i, item in enumerate(value):
416
+ try:
417
+ type_validator(f"{name} item", item, item_type)
418
+ except TypeError as e:
419
+ raise TypeError(f"Invalid item in set '{name}'") from e
420
+
421
+
422
+ def _validate_simple_type(name: str, value: Any, expected_type: type) -> None:
423
+ """Validate simple type (int, str, etc.)."""
424
+ if not isinstance(value, expected_type):
425
+ raise TypeError(
426
+ f"Field '{name}' expected {expected_type.__name__}, got {type(value).__name__} (value: {repr(value)})"
427
+ )
428
+
429
+
430
+ def _create_type_validator(field: Field) -> Validator_T:
431
+ """Create a type validator function for a field."""
432
+ # Hacky: we cannot use a lambda here because of reference issues
433
+
434
+ def validator(value: Any) -> None:
435
+ type_validator(field.name, value, field.type)
436
+
437
+ return validator
438
+
439
+
440
+ def _is_validator(validator: Any) -> bool:
441
+ """Check if a function is a validator.
442
+
443
+ A validator is a Callable that can be called with a single positional argument.
444
+ The validator can have more arguments with default values.
445
+
446
+ Basically, returns True if `validator(value)` is possible.
447
+ """
448
+ if not callable(validator):
449
+ return False
450
+
451
+ signature = inspect.signature(validator)
452
+ parameters = list(signature.parameters.values())
453
+ if len(parameters) == 0:
454
+ return False
455
+ if parameters[0].kind not in (
456
+ inspect.Parameter.POSITIONAL_OR_KEYWORD,
457
+ inspect.Parameter.POSITIONAL_ONLY,
458
+ inspect.Parameter.VAR_POSITIONAL,
459
+ ):
460
+ return False
461
+ for parameter in parameters[1:]:
462
+ if parameter.default == inspect.Parameter.empty:
463
+ return False
464
+ return True
465
+
466
+
467
+ _BASIC_TYPE_VALIDATORS = {
468
+ Union: _validate_union,
469
+ Literal: _validate_literal,
470
+ list: _validate_list,
471
+ dict: _validate_dict,
472
+ tuple: _validate_tuple,
473
+ set: _validate_set,
474
+ }
475
+
476
+
477
+ __all__ = [
478
+ "strict",
479
+ "validated_field",
480
+ "Validator_T",
481
+ "StrictDataclassClassValidationError",
482
+ "StrictDataclassDefinitionError",
483
+ "StrictDataclassFieldValidationError",
484
+ ]