patrickramos commited on
Commit
fedd961
·
verified ·
1 Parent(s): 0944637

Add files using upload-large-folder tool

Browse files
Files changed (50) hide show
  1. .venv/lib/python3.13/site-packages/fsspec/__init__.py +71 -0
  2. .venv/lib/python3.13/site-packages/fsspec/_version.py +21 -0
  3. .venv/lib/python3.13/site-packages/fsspec/archive.py +75 -0
  4. .venv/lib/python3.13/site-packages/fsspec/asyn.py +1097 -0
  5. .venv/lib/python3.13/site-packages/fsspec/callbacks.py +324 -0
  6. .venv/lib/python3.13/site-packages/fsspec/compression.py +182 -0
  7. .venv/lib/python3.13/site-packages/fsspec/config.py +131 -0
  8. .venv/lib/python3.13/site-packages/fsspec/conftest.py +55 -0
  9. .venv/lib/python3.13/site-packages/fsspec/core.py +743 -0
  10. .venv/lib/python3.13/site-packages/fsspec/dircache.py +98 -0
  11. .venv/lib/python3.13/site-packages/fsspec/fuse.py +324 -0
  12. .venv/lib/python3.13/site-packages/fsspec/generic.py +394 -0
  13. .venv/lib/python3.13/site-packages/fsspec/gui.py +417 -0
  14. .venv/lib/python3.13/site-packages/fsspec/mapping.py +251 -0
  15. .venv/lib/python3.13/site-packages/fsspec/parquet.py +541 -0
  16. .venv/lib/python3.13/site-packages/fsspec/registry.py +330 -0
  17. .venv/lib/python3.13/site-packages/fsspec/spec.py +2270 -0
  18. .venv/lib/python3.13/site-packages/fsspec/transaction.py +90 -0
  19. .venv/lib/python3.13/site-packages/hf_xet-1.1.5.dist-info/INSTALLER +1 -0
  20. .venv/lib/python3.13/site-packages/hf_xet-1.1.5.dist-info/METADATA +23 -0
  21. .venv/lib/python3.13/site-packages/hf_xet-1.1.5.dist-info/RECORD +8 -0
  22. .venv/lib/python3.13/site-packages/hf_xet-1.1.5.dist-info/REQUESTED +0 -0
  23. .venv/lib/python3.13/site-packages/hf_xet-1.1.5.dist-info/WHEEL +4 -0
  24. .venv/lib/python3.13/site-packages/hf_xet/__init__.py +5 -0
  25. .venv/lib/python3.13/site-packages/huggingface_hub/__init__.py +1484 -0
  26. .venv/lib/python3.13/site-packages/huggingface_hub/_commit_api.py +915 -0
  27. .venv/lib/python3.13/site-packages/huggingface_hub/_commit_scheduler.py +353 -0
  28. .venv/lib/python3.13/site-packages/huggingface_hub/_inference_endpoints.py +413 -0
  29. .venv/lib/python3.13/site-packages/huggingface_hub/_local_folder.py +441 -0
  30. .venv/lib/python3.13/site-packages/huggingface_hub/_login.py +520 -0
  31. .venv/lib/python3.13/site-packages/huggingface_hub/_oauth.py +464 -0
  32. .venv/lib/python3.13/site-packages/huggingface_hub/_snapshot_download.py +338 -0
  33. .venv/lib/python3.13/site-packages/huggingface_hub/_tensorboard_logger.py +194 -0
  34. .venv/lib/python3.13/site-packages/huggingface_hub/_upload_large_folder.py +625 -0
  35. .venv/lib/python3.13/site-packages/huggingface_hub/dataclasses.py +481 -0
  36. .venv/lib/python3.13/site-packages/huggingface_hub/errors.py +377 -0
  37. .venv/lib/python3.13/site-packages/huggingface_hub/fastai_utils.py +425 -0
  38. .venv/lib/python3.13/site-packages/huggingface_hub/hf_api.py +0 -0
  39. .venv/lib/python3.13/site-packages/huggingface_hub/hf_file_system.py +1142 -0
  40. .venv/lib/python3.13/site-packages/huggingface_hub/inference_api.py +217 -0
  41. .venv/lib/python3.13/site-packages/huggingface_hub/py.typed +0 -0
  42. .venv/lib/python3.13/site-packages/huggingface_hub/repocard.py +830 -0
  43. .venv/lib/python3.13/site-packages/huggingface_hub/repository.py +1477 -0
  44. .venv/lib/python3.13/site-packages/inquirerpy-0.3.4.dist-info/INSTALLER +1 -0
  45. .venv/lib/python3.13/site-packages/inquirerpy-0.3.4.dist-info/LICENSE +21 -0
  46. .venv/lib/python3.13/site-packages/inquirerpy-0.3.4.dist-info/METADATA +191 -0
  47. .venv/lib/python3.13/site-packages/inquirerpy-0.3.4.dist-info/RECORD +36 -0
  48. .venv/lib/python3.13/site-packages/inquirerpy-0.3.4.dist-info/REQUESTED +0 -0
  49. .venv/lib/python3.13/site-packages/inquirerpy-0.3.4.dist-info/WHEEL +4 -0
  50. 2022/.DS_Store +0 -0
.venv/lib/python3.13/site-packages/fsspec/__init__.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from . import caching
2
+ from ._version import __version__ # noqa: F401
3
+ from .callbacks import Callback
4
+ from .compression import available_compressions
5
+ from .core import get_fs_token_paths, open, open_files, open_local, url_to_fs
6
+ from .exceptions import FSTimeoutError
7
+ from .mapping import FSMap, get_mapper
8
+ from .registry import (
9
+ available_protocols,
10
+ filesystem,
11
+ get_filesystem_class,
12
+ register_implementation,
13
+ registry,
14
+ )
15
+ from .spec import AbstractFileSystem
16
+
17
+ __all__ = [
18
+ "AbstractFileSystem",
19
+ "FSTimeoutError",
20
+ "FSMap",
21
+ "filesystem",
22
+ "register_implementation",
23
+ "get_filesystem_class",
24
+ "get_fs_token_paths",
25
+ "get_mapper",
26
+ "open",
27
+ "open_files",
28
+ "open_local",
29
+ "registry",
30
+ "caching",
31
+ "Callback",
32
+ "available_protocols",
33
+ "available_compressions",
34
+ "url_to_fs",
35
+ ]
36
+
37
+
38
+ def process_entries():
39
+ try:
40
+ from importlib.metadata import entry_points
41
+ except ImportError:
42
+ return
43
+ if entry_points is not None:
44
+ try:
45
+ eps = entry_points()
46
+ except TypeError:
47
+ pass # importlib-metadata < 0.8
48
+ else:
49
+ if hasattr(eps, "select"): # Python 3.10+ / importlib_metadata >= 3.9.0
50
+ specs = eps.select(group="fsspec.specs")
51
+ else:
52
+ specs = eps.get("fsspec.specs", [])
53
+ registered_names = {}
54
+ for spec in specs:
55
+ err_msg = f"Unable to load filesystem from {spec}"
56
+ name = spec.name
57
+ if name in registered_names:
58
+ continue
59
+ registered_names[name] = True
60
+ register_implementation(
61
+ name,
62
+ spec.value.replace(":", "."),
63
+ errtxt=err_msg,
64
+ # We take our implementations as the ones to overload with if
65
+ # for some reason we encounter some, may be the same, already
66
+ # registered
67
+ clobber=True,
68
+ )
69
+
70
+
71
+ process_entries()
.venv/lib/python3.13/site-packages/fsspec/_version.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # file generated by setuptools-scm
2
+ # don't change, don't track in version control
3
+
4
+ __all__ = ["__version__", "__version_tuple__", "version", "version_tuple"]
5
+
6
+ TYPE_CHECKING = False
7
+ if TYPE_CHECKING:
8
+ from typing import Tuple
9
+ from typing import Union
10
+
11
+ VERSION_TUPLE = Tuple[Union[int, str], ...]
12
+ else:
13
+ VERSION_TUPLE = object
14
+
15
+ version: str
16
+ __version__: str
17
+ __version_tuple__: VERSION_TUPLE
18
+ version_tuple: VERSION_TUPLE
19
+
20
+ __version__ = version = '2025.7.0'
21
+ __version_tuple__ = version_tuple = (2025, 7, 0)
.venv/lib/python3.13/site-packages/fsspec/archive.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import operator
2
+
3
+ from fsspec import AbstractFileSystem
4
+ from fsspec.utils import tokenize
5
+
6
+
7
+ class AbstractArchiveFileSystem(AbstractFileSystem):
8
+ """
9
+ A generic superclass for implementing Archive-based filesystems.
10
+
11
+ Currently, it is shared amongst
12
+ :class:`~fsspec.implementations.zip.ZipFileSystem`,
13
+ :class:`~fsspec.implementations.libarchive.LibArchiveFileSystem` and
14
+ :class:`~fsspec.implementations.tar.TarFileSystem`.
15
+ """
16
+
17
+ def __str__(self):
18
+ return f"<Archive-like object {type(self).__name__} at {id(self)}>"
19
+
20
+ __repr__ = __str__
21
+
22
+ def ukey(self, path):
23
+ return tokenize(path, self.fo, self.protocol)
24
+
25
+ def _all_dirnames(self, paths):
26
+ """Returns *all* directory names for each path in paths, including intermediate
27
+ ones.
28
+
29
+ Parameters
30
+ ----------
31
+ paths: Iterable of path strings
32
+ """
33
+ if len(paths) == 0:
34
+ return set()
35
+
36
+ dirnames = {self._parent(path) for path in paths} - {self.root_marker}
37
+ return dirnames | self._all_dirnames(dirnames)
38
+
39
+ def info(self, path, **kwargs):
40
+ self._get_dirs()
41
+ path = self._strip_protocol(path)
42
+ if path in {"", "/"} and self.dir_cache:
43
+ return {"name": "", "type": "directory", "size": 0}
44
+ if path in self.dir_cache:
45
+ return self.dir_cache[path]
46
+ elif path + "/" in self.dir_cache:
47
+ return self.dir_cache[path + "/"]
48
+ else:
49
+ raise FileNotFoundError(path)
50
+
51
+ def ls(self, path, detail=True, **kwargs):
52
+ self._get_dirs()
53
+ paths = {}
54
+ for p, f in self.dir_cache.items():
55
+ p = p.rstrip("/")
56
+ if "/" in p:
57
+ root = p.rsplit("/", 1)[0]
58
+ else:
59
+ root = ""
60
+ if root == path.rstrip("/"):
61
+ paths[p] = f
62
+ elif all(
63
+ (a == b)
64
+ for a, b in zip(path.split("/"), [""] + p.strip("/").split("/"))
65
+ ):
66
+ # root directory entry
67
+ ppath = p.rstrip("/").split("/", 1)[0]
68
+ if ppath not in paths:
69
+ out = {"name": ppath, "size": 0, "type": "directory"}
70
+ paths[ppath] = out
71
+ if detail:
72
+ out = sorted(paths.values(), key=operator.itemgetter("name"))
73
+ return out
74
+ else:
75
+ return sorted(paths)
.venv/lib/python3.13/site-packages/fsspec/asyn.py ADDED
@@ -0,0 +1,1097 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import asyncio.events
3
+ import functools
4
+ import inspect
5
+ import io
6
+ import numbers
7
+ import os
8
+ import re
9
+ import threading
10
+ from collections.abc import Iterable
11
+ from glob import has_magic
12
+ from typing import TYPE_CHECKING
13
+
14
+ from .callbacks import DEFAULT_CALLBACK
15
+ from .exceptions import FSTimeoutError
16
+ from .implementations.local import LocalFileSystem, make_path_posix, trailing_sep
17
+ from .spec import AbstractBufferedFile, AbstractFileSystem
18
+ from .utils import glob_translate, is_exception, other_paths
19
+
20
+ private = re.compile("_[^_]")
21
+ iothread = [None] # dedicated fsspec IO thread
22
+ loop = [None] # global event loop for any non-async instance
23
+ _lock = None # global lock placeholder
24
+ get_running_loop = asyncio.get_running_loop
25
+
26
+
27
+ def get_lock():
28
+ """Allocate or return a threading lock.
29
+
30
+ The lock is allocated on first use to allow setting one lock per forked process.
31
+ """
32
+ global _lock
33
+ if not _lock:
34
+ _lock = threading.Lock()
35
+ return _lock
36
+
37
+
38
+ def reset_lock():
39
+ """Reset the global lock.
40
+
41
+ This should be called only on the init of a forked process to reset the lock to
42
+ None, enabling the new forked process to get a new lock.
43
+ """
44
+ global _lock
45
+
46
+ iothread[0] = None
47
+ loop[0] = None
48
+ _lock = None
49
+
50
+
51
+ async def _runner(event, coro, result, timeout=None):
52
+ timeout = timeout if timeout else None # convert 0 or 0.0 to None
53
+ if timeout is not None:
54
+ coro = asyncio.wait_for(coro, timeout=timeout)
55
+ try:
56
+ result[0] = await coro
57
+ except Exception as ex:
58
+ result[0] = ex
59
+ finally:
60
+ event.set()
61
+
62
+
63
+ def sync(loop, func, *args, timeout=None, **kwargs):
64
+ """
65
+ Make loop run coroutine until it returns. Runs in other thread
66
+
67
+ Examples
68
+ --------
69
+ >>> fsspec.asyn.sync(fsspec.asyn.get_loop(), func, *args,
70
+ timeout=timeout, **kwargs)
71
+ """
72
+ timeout = timeout if timeout else None # convert 0 or 0.0 to None
73
+ # NB: if the loop is not running *yet*, it is OK to submit work
74
+ # and we will wait for it
75
+ if loop is None or loop.is_closed():
76
+ raise RuntimeError("Loop is not running")
77
+ try:
78
+ loop0 = asyncio.events.get_running_loop()
79
+ if loop0 is loop:
80
+ raise NotImplementedError("Calling sync() from within a running loop")
81
+ except NotImplementedError:
82
+ raise
83
+ except RuntimeError:
84
+ pass
85
+ coro = func(*args, **kwargs)
86
+ result = [None]
87
+ event = threading.Event()
88
+ asyncio.run_coroutine_threadsafe(_runner(event, coro, result, timeout), loop)
89
+ while True:
90
+ # this loops allows thread to get interrupted
91
+ if event.wait(1):
92
+ break
93
+ if timeout is not None:
94
+ timeout -= 1
95
+ if timeout < 0:
96
+ raise FSTimeoutError
97
+
98
+ return_result = result[0]
99
+ if isinstance(return_result, asyncio.TimeoutError):
100
+ # suppress asyncio.TimeoutError, raise FSTimeoutError
101
+ raise FSTimeoutError from return_result
102
+ elif isinstance(return_result, BaseException):
103
+ raise return_result
104
+ else:
105
+ return return_result
106
+
107
+
108
+ def sync_wrapper(func, obj=None):
109
+ """Given a function, make so can be called in blocking contexts
110
+
111
+ Leave obj=None if defining within a class. Pass the instance if attaching
112
+ as an attribute of the instance.
113
+ """
114
+
115
+ @functools.wraps(func)
116
+ def wrapper(*args, **kwargs):
117
+ self = obj or args[0]
118
+ return sync(self.loop, func, *args, **kwargs)
119
+
120
+ return wrapper
121
+
122
+
123
+ def get_loop():
124
+ """Create or return the default fsspec IO loop
125
+
126
+ The loop will be running on a separate thread.
127
+ """
128
+ if loop[0] is None:
129
+ with get_lock():
130
+ # repeat the check just in case the loop got filled between the
131
+ # previous two calls from another thread
132
+ if loop[0] is None:
133
+ loop[0] = asyncio.new_event_loop()
134
+ th = threading.Thread(target=loop[0].run_forever, name="fsspecIO")
135
+ th.daemon = True
136
+ th.start()
137
+ iothread[0] = th
138
+ return loop[0]
139
+
140
+
141
+ def reset_after_fork():
142
+ global lock
143
+ loop[0] = None
144
+ iothread[0] = None
145
+ lock = None
146
+
147
+
148
+ if hasattr(os, "register_at_fork"):
149
+ # should be posix; this will do nothing for spawn or forkserver subprocesses
150
+ os.register_at_fork(after_in_child=reset_after_fork)
151
+
152
+
153
+ if TYPE_CHECKING:
154
+ import resource
155
+
156
+ ResourceError = resource.error
157
+ else:
158
+ try:
159
+ import resource
160
+ except ImportError:
161
+ resource = None
162
+ ResourceError = OSError
163
+ else:
164
+ ResourceError = getattr(resource, "error", OSError)
165
+
166
+ _DEFAULT_BATCH_SIZE = 128
167
+ _NOFILES_DEFAULT_BATCH_SIZE = 1280
168
+
169
+
170
+ def _get_batch_size(nofiles=False):
171
+ from fsspec.config import conf
172
+
173
+ if nofiles:
174
+ if "nofiles_gather_batch_size" in conf:
175
+ return conf["nofiles_gather_batch_size"]
176
+ else:
177
+ if "gather_batch_size" in conf:
178
+ return conf["gather_batch_size"]
179
+ if nofiles:
180
+ return _NOFILES_DEFAULT_BATCH_SIZE
181
+ if resource is None:
182
+ return _DEFAULT_BATCH_SIZE
183
+
184
+ try:
185
+ soft_limit, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
186
+ except (ImportError, ValueError, ResourceError):
187
+ return _DEFAULT_BATCH_SIZE
188
+
189
+ if soft_limit == resource.RLIM_INFINITY:
190
+ return -1
191
+ else:
192
+ return soft_limit // 8
193
+
194
+
195
+ def running_async() -> bool:
196
+ """Being executed by an event loop?"""
197
+ try:
198
+ asyncio.get_running_loop()
199
+ return True
200
+ except RuntimeError:
201
+ return False
202
+
203
+
204
+ async def _run_coros_in_chunks(
205
+ coros,
206
+ batch_size=None,
207
+ callback=DEFAULT_CALLBACK,
208
+ timeout=None,
209
+ return_exceptions=False,
210
+ nofiles=False,
211
+ ):
212
+ """Run the given coroutines in chunks.
213
+
214
+ Parameters
215
+ ----------
216
+ coros: list of coroutines to run
217
+ batch_size: int or None
218
+ Number of coroutines to submit/wait on simultaneously.
219
+ If -1, then it will not be any throttling. If
220
+ None, it will be inferred from _get_batch_size()
221
+ callback: fsspec.callbacks.Callback instance
222
+ Gets a relative_update when each coroutine completes
223
+ timeout: number or None
224
+ If given, each coroutine times out after this time. Note that, since
225
+ there are multiple batches, the total run time of this function will in
226
+ general be longer
227
+ return_exceptions: bool
228
+ Same meaning as in asyncio.gather
229
+ nofiles: bool
230
+ If inferring the batch_size, does this operation involve local files?
231
+ If yes, you normally expect smaller batches.
232
+ """
233
+
234
+ if batch_size is None:
235
+ batch_size = _get_batch_size(nofiles=nofiles)
236
+
237
+ if batch_size == -1:
238
+ batch_size = len(coros)
239
+
240
+ assert batch_size > 0
241
+
242
+ async def _run_coro(coro, i):
243
+ try:
244
+ return await asyncio.wait_for(coro, timeout=timeout), i
245
+ except Exception as e:
246
+ if not return_exceptions:
247
+ raise
248
+ return e, i
249
+ finally:
250
+ callback.relative_update(1)
251
+
252
+ i = 0
253
+ n = len(coros)
254
+ results = [None] * n
255
+ pending = set()
256
+
257
+ while pending or i < n:
258
+ while len(pending) < batch_size and i < n:
259
+ pending.add(asyncio.ensure_future(_run_coro(coros[i], i)))
260
+ i += 1
261
+
262
+ if not pending:
263
+ break
264
+
265
+ done, pending = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED)
266
+ while done:
267
+ result, k = await done.pop()
268
+ results[k] = result
269
+
270
+ return results
271
+
272
+
273
+ # these methods should be implemented as async by any async-able backend
274
+ async_methods = [
275
+ "_ls",
276
+ "_cat_file",
277
+ "_get_file",
278
+ "_put_file",
279
+ "_rm_file",
280
+ "_cp_file",
281
+ "_pipe_file",
282
+ "_expand_path",
283
+ "_info",
284
+ "_isfile",
285
+ "_isdir",
286
+ "_exists",
287
+ "_walk",
288
+ "_glob",
289
+ "_find",
290
+ "_du",
291
+ "_size",
292
+ "_mkdir",
293
+ "_makedirs",
294
+ ]
295
+
296
+
297
+ class AsyncFileSystem(AbstractFileSystem):
298
+ """Async file operations, default implementations
299
+
300
+ Passes bulk operations to asyncio.gather for concurrent operation.
301
+
302
+ Implementations that have concurrent batch operations and/or async methods
303
+ should inherit from this class instead of AbstractFileSystem. Docstrings are
304
+ copied from the un-underscored method in AbstractFileSystem, if not given.
305
+ """
306
+
307
+ # note that methods do not have docstring here; they will be copied
308
+ # for _* methods and inferred for overridden methods.
309
+
310
+ async_impl = True
311
+ mirror_sync_methods = True
312
+ disable_throttling = False
313
+
314
+ def __init__(self, *args, asynchronous=False, loop=None, batch_size=None, **kwargs):
315
+ self.asynchronous = asynchronous
316
+ self._pid = os.getpid()
317
+ if not asynchronous:
318
+ self._loop = loop or get_loop()
319
+ else:
320
+ self._loop = None
321
+ self.batch_size = batch_size
322
+ super().__init__(*args, **kwargs)
323
+
324
+ @property
325
+ def loop(self):
326
+ if self._pid != os.getpid():
327
+ raise RuntimeError("This class is not fork-safe")
328
+ return self._loop
329
+
330
+ async def _rm_file(self, path, **kwargs):
331
+ raise NotImplementedError
332
+
333
+ async def _rm(self, path, recursive=False, batch_size=None, **kwargs):
334
+ # TODO: implement on_error
335
+ batch_size = batch_size or self.batch_size
336
+ path = await self._expand_path(path, recursive=recursive)
337
+ return await _run_coros_in_chunks(
338
+ [self._rm_file(p, **kwargs) for p in reversed(path)],
339
+ batch_size=batch_size,
340
+ nofiles=True,
341
+ )
342
+
343
+ async def _cp_file(self, path1, path2, **kwargs):
344
+ raise NotImplementedError
345
+
346
+ async def _mv_file(self, path1, path2):
347
+ await self._cp_file(path1, path2)
348
+ await self._rm_file(path1)
349
+
350
+ async def _copy(
351
+ self,
352
+ path1,
353
+ path2,
354
+ recursive=False,
355
+ on_error=None,
356
+ maxdepth=None,
357
+ batch_size=None,
358
+ **kwargs,
359
+ ):
360
+ if on_error is None and recursive:
361
+ on_error = "ignore"
362
+ elif on_error is None:
363
+ on_error = "raise"
364
+
365
+ if isinstance(path1, list) and isinstance(path2, list):
366
+ # No need to expand paths when both source and destination
367
+ # are provided as lists
368
+ paths1 = path1
369
+ paths2 = path2
370
+ else:
371
+ source_is_str = isinstance(path1, str)
372
+ paths1 = await self._expand_path(
373
+ path1, maxdepth=maxdepth, recursive=recursive
374
+ )
375
+ if source_is_str and (not recursive or maxdepth is not None):
376
+ # Non-recursive glob does not copy directories
377
+ paths1 = [
378
+ p for p in paths1 if not (trailing_sep(p) or await self._isdir(p))
379
+ ]
380
+ if not paths1:
381
+ return
382
+
383
+ source_is_file = len(paths1) == 1
384
+ dest_is_dir = isinstance(path2, str) and (
385
+ trailing_sep(path2) or await self._isdir(path2)
386
+ )
387
+
388
+ exists = source_is_str and (
389
+ (has_magic(path1) and source_is_file)
390
+ or (not has_magic(path1) and dest_is_dir and not trailing_sep(path1))
391
+ )
392
+ paths2 = other_paths(
393
+ paths1,
394
+ path2,
395
+ exists=exists,
396
+ flatten=not source_is_str,
397
+ )
398
+
399
+ batch_size = batch_size or self.batch_size
400
+ coros = [self._cp_file(p1, p2, **kwargs) for p1, p2 in zip(paths1, paths2)]
401
+ result = await _run_coros_in_chunks(
402
+ coros, batch_size=batch_size, return_exceptions=True, nofiles=True
403
+ )
404
+
405
+ for ex in filter(is_exception, result):
406
+ if on_error == "ignore" and isinstance(ex, FileNotFoundError):
407
+ continue
408
+ raise ex
409
+
410
+ async def _pipe_file(self, path, value, mode="overwrite", **kwargs):
411
+ raise NotImplementedError
412
+
413
+ async def _pipe(self, path, value=None, batch_size=None, **kwargs):
414
+ if isinstance(path, str):
415
+ path = {path: value}
416
+ batch_size = batch_size or self.batch_size
417
+ return await _run_coros_in_chunks(
418
+ [self._pipe_file(k, v, **kwargs) for k, v in path.items()],
419
+ batch_size=batch_size,
420
+ nofiles=True,
421
+ )
422
+
423
+ async def _process_limits(self, url, start, end):
424
+ """Helper for "Range"-based _cat_file"""
425
+ size = None
426
+ suff = False
427
+ if start is not None and start < 0:
428
+ # if start is negative and end None, end is the "suffix length"
429
+ if end is None:
430
+ end = -start
431
+ start = ""
432
+ suff = True
433
+ else:
434
+ size = size or (await self._info(url))["size"]
435
+ start = size + start
436
+ elif start is None:
437
+ start = 0
438
+ if not suff:
439
+ if end is not None and end < 0:
440
+ if start is not None:
441
+ size = size or (await self._info(url))["size"]
442
+ end = size + end
443
+ elif end is None:
444
+ end = ""
445
+ if isinstance(end, numbers.Integral):
446
+ end -= 1 # bytes range is inclusive
447
+ return f"bytes={start}-{end}"
448
+
449
+ async def _cat_file(self, path, start=None, end=None, **kwargs):
450
+ raise NotImplementedError
451
+
452
+ async def _cat(
453
+ self, path, recursive=False, on_error="raise", batch_size=None, **kwargs
454
+ ):
455
+ paths = await self._expand_path(path, recursive=recursive)
456
+ coros = [self._cat_file(path, **kwargs) for path in paths]
457
+ batch_size = batch_size or self.batch_size
458
+ out = await _run_coros_in_chunks(
459
+ coros, batch_size=batch_size, nofiles=True, return_exceptions=True
460
+ )
461
+ if on_error == "raise":
462
+ ex = next(filter(is_exception, out), False)
463
+ if ex:
464
+ raise ex
465
+ if (
466
+ len(paths) > 1
467
+ or isinstance(path, list)
468
+ or paths[0] != self._strip_protocol(path)
469
+ ):
470
+ return {
471
+ k: v
472
+ for k, v in zip(paths, out)
473
+ if on_error != "omit" or not is_exception(v)
474
+ }
475
+ else:
476
+ return out[0]
477
+
478
+ async def _cat_ranges(
479
+ self,
480
+ paths,
481
+ starts,
482
+ ends,
483
+ max_gap=None,
484
+ batch_size=None,
485
+ on_error="return",
486
+ **kwargs,
487
+ ):
488
+ """Get the contents of byte ranges from one or more files
489
+
490
+ Parameters
491
+ ----------
492
+ paths: list
493
+ A list of of filepaths on this filesystems
494
+ starts, ends: int or list
495
+ Bytes limits of the read. If using a single int, the same value will be
496
+ used to read all the specified files.
497
+ """
498
+ # TODO: on_error
499
+ if max_gap is not None:
500
+ # use utils.merge_offset_ranges
501
+ raise NotImplementedError
502
+ if not isinstance(paths, list):
503
+ raise TypeError
504
+ if not isinstance(starts, Iterable):
505
+ starts = [starts] * len(paths)
506
+ if not isinstance(ends, Iterable):
507
+ ends = [ends] * len(paths)
508
+ if len(starts) != len(paths) or len(ends) != len(paths):
509
+ raise ValueError
510
+ coros = [
511
+ self._cat_file(p, start=s, end=e, **kwargs)
512
+ for p, s, e in zip(paths, starts, ends)
513
+ ]
514
+ batch_size = batch_size or self.batch_size
515
+ return await _run_coros_in_chunks(
516
+ coros, batch_size=batch_size, nofiles=True, return_exceptions=True
517
+ )
518
+
519
+ async def _put_file(self, lpath, rpath, mode="overwrite", **kwargs):
520
+ raise NotImplementedError
521
+
522
+ async def _put(
523
+ self,
524
+ lpath,
525
+ rpath,
526
+ recursive=False,
527
+ callback=DEFAULT_CALLBACK,
528
+ batch_size=None,
529
+ maxdepth=None,
530
+ **kwargs,
531
+ ):
532
+ """Copy file(s) from local.
533
+
534
+ Copies a specific file or tree of files (if recursive=True). If rpath
535
+ ends with a "/", it will be assumed to be a directory, and target files
536
+ will go within.
537
+
538
+ The put_file method will be called concurrently on a batch of files. The
539
+ batch_size option can configure the amount of futures that can be executed
540
+ at the same time. If it is -1, then all the files will be uploaded concurrently.
541
+ The default can be set for this instance by passing "batch_size" in the
542
+ constructor, or for all instances by setting the "gather_batch_size" key
543
+ in ``fsspec.config.conf``, falling back to 1/8th of the system limit .
544
+ """
545
+ if isinstance(lpath, list) and isinstance(rpath, list):
546
+ # No need to expand paths when both source and destination
547
+ # are provided as lists
548
+ rpaths = rpath
549
+ lpaths = lpath
550
+ else:
551
+ source_is_str = isinstance(lpath, str)
552
+ if source_is_str:
553
+ lpath = make_path_posix(lpath)
554
+ fs = LocalFileSystem()
555
+ lpaths = fs.expand_path(lpath, recursive=recursive, maxdepth=maxdepth)
556
+ if source_is_str and (not recursive or maxdepth is not None):
557
+ # Non-recursive glob does not copy directories
558
+ lpaths = [p for p in lpaths if not (trailing_sep(p) or fs.isdir(p))]
559
+ if not lpaths:
560
+ return
561
+
562
+ source_is_file = len(lpaths) == 1
563
+ dest_is_dir = isinstance(rpath, str) and (
564
+ trailing_sep(rpath) or await self._isdir(rpath)
565
+ )
566
+
567
+ rpath = self._strip_protocol(rpath)
568
+ exists = source_is_str and (
569
+ (has_magic(lpath) and source_is_file)
570
+ or (not has_magic(lpath) and dest_is_dir and not trailing_sep(lpath))
571
+ )
572
+ rpaths = other_paths(
573
+ lpaths,
574
+ rpath,
575
+ exists=exists,
576
+ flatten=not source_is_str,
577
+ )
578
+
579
+ is_dir = {l: os.path.isdir(l) for l in lpaths}
580
+ rdirs = [r for l, r in zip(lpaths, rpaths) if is_dir[l]]
581
+ file_pairs = [(l, r) for l, r in zip(lpaths, rpaths) if not is_dir[l]]
582
+
583
+ await asyncio.gather(*[self._makedirs(d, exist_ok=True) for d in rdirs])
584
+ batch_size = batch_size or self.batch_size
585
+
586
+ coros = []
587
+ callback.set_size(len(file_pairs))
588
+ for lfile, rfile in file_pairs:
589
+ put_file = callback.branch_coro(self._put_file)
590
+ coros.append(put_file(lfile, rfile, **kwargs))
591
+
592
+ return await _run_coros_in_chunks(
593
+ coros, batch_size=batch_size, callback=callback
594
+ )
595
+
596
+ async def _get_file(self, rpath, lpath, **kwargs):
597
+ raise NotImplementedError
598
+
599
+ async def _get(
600
+ self,
601
+ rpath,
602
+ lpath,
603
+ recursive=False,
604
+ callback=DEFAULT_CALLBACK,
605
+ maxdepth=None,
606
+ **kwargs,
607
+ ):
608
+ """Copy file(s) to local.
609
+
610
+ Copies a specific file or tree of files (if recursive=True). If lpath
611
+ ends with a "/", it will be assumed to be a directory, and target files
612
+ will go within. Can submit a list of paths, which may be glob-patterns
613
+ and will be expanded.
614
+
615
+ The get_file method will be called concurrently on a batch of files. The
616
+ batch_size option can configure the amount of futures that can be executed
617
+ at the same time. If it is -1, then all the files will be uploaded concurrently.
618
+ The default can be set for this instance by passing "batch_size" in the
619
+ constructor, or for all instances by setting the "gather_batch_size" key
620
+ in ``fsspec.config.conf``, falling back to 1/8th of the system limit .
621
+ """
622
+ if isinstance(lpath, list) and isinstance(rpath, list):
623
+ # No need to expand paths when both source and destination
624
+ # are provided as lists
625
+ rpaths = rpath
626
+ lpaths = lpath
627
+ else:
628
+ source_is_str = isinstance(rpath, str)
629
+ # First check for rpath trailing slash as _strip_protocol removes it.
630
+ source_not_trailing_sep = source_is_str and not trailing_sep(rpath)
631
+ rpath = self._strip_protocol(rpath)
632
+ rpaths = await self._expand_path(
633
+ rpath, recursive=recursive, maxdepth=maxdepth
634
+ )
635
+ if source_is_str and (not recursive or maxdepth is not None):
636
+ # Non-recursive glob does not copy directories
637
+ rpaths = [
638
+ p for p in rpaths if not (trailing_sep(p) or await self._isdir(p))
639
+ ]
640
+ if not rpaths:
641
+ return
642
+
643
+ lpath = make_path_posix(lpath)
644
+ source_is_file = len(rpaths) == 1
645
+ dest_is_dir = isinstance(lpath, str) and (
646
+ trailing_sep(lpath) or LocalFileSystem().isdir(lpath)
647
+ )
648
+
649
+ exists = source_is_str and (
650
+ (has_magic(rpath) and source_is_file)
651
+ or (not has_magic(rpath) and dest_is_dir and source_not_trailing_sep)
652
+ )
653
+ lpaths = other_paths(
654
+ rpaths,
655
+ lpath,
656
+ exists=exists,
657
+ flatten=not source_is_str,
658
+ )
659
+
660
+ [os.makedirs(os.path.dirname(lp), exist_ok=True) for lp in lpaths]
661
+ batch_size = kwargs.pop("batch_size", self.batch_size)
662
+
663
+ coros = []
664
+ callback.set_size(len(lpaths))
665
+ for lpath, rpath in zip(lpaths, rpaths):
666
+ get_file = callback.branch_coro(self._get_file)
667
+ coros.append(get_file(rpath, lpath, **kwargs))
668
+ return await _run_coros_in_chunks(
669
+ coros, batch_size=batch_size, callback=callback
670
+ )
671
+
672
+ async def _isfile(self, path):
673
+ try:
674
+ return (await self._info(path))["type"] == "file"
675
+ except: # noqa: E722
676
+ return False
677
+
678
+ async def _isdir(self, path):
679
+ try:
680
+ return (await self._info(path))["type"] == "directory"
681
+ except OSError:
682
+ return False
683
+
684
+ async def _size(self, path):
685
+ return (await self._info(path)).get("size", None)
686
+
687
+ async def _sizes(self, paths, batch_size=None):
688
+ batch_size = batch_size or self.batch_size
689
+ return await _run_coros_in_chunks(
690
+ [self._size(p) for p in paths], batch_size=batch_size
691
+ )
692
+
693
+ async def _exists(self, path, **kwargs):
694
+ try:
695
+ await self._info(path, **kwargs)
696
+ return True
697
+ except FileNotFoundError:
698
+ return False
699
+
700
+ async def _info(self, path, **kwargs):
701
+ raise NotImplementedError
702
+
703
+ async def _ls(self, path, detail=True, **kwargs):
704
+ raise NotImplementedError
705
+
706
+ async def _walk(self, path, maxdepth=None, on_error="omit", **kwargs):
707
+ if maxdepth is not None and maxdepth < 1:
708
+ raise ValueError("maxdepth must be at least 1")
709
+
710
+ path = self._strip_protocol(path)
711
+ full_dirs = {}
712
+ dirs = {}
713
+ files = {}
714
+
715
+ detail = kwargs.pop("detail", False)
716
+ try:
717
+ listing = await self._ls(path, detail=True, **kwargs)
718
+ except (FileNotFoundError, OSError) as e:
719
+ if on_error == "raise":
720
+ raise
721
+ elif callable(on_error):
722
+ on_error(e)
723
+ if detail:
724
+ yield path, {}, {}
725
+ else:
726
+ yield path, [], []
727
+ return
728
+
729
+ for info in listing:
730
+ # each info name must be at least [path]/part , but here
731
+ # we check also for names like [path]/part/
732
+ pathname = info["name"].rstrip("/")
733
+ name = pathname.rsplit("/", 1)[-1]
734
+ if info["type"] == "directory" and pathname != path:
735
+ # do not include "self" path
736
+ full_dirs[name] = pathname
737
+ dirs[name] = info
738
+ elif pathname == path:
739
+ # file-like with same name as give path
740
+ files[""] = info
741
+ else:
742
+ files[name] = info
743
+
744
+ if detail:
745
+ yield path, dirs, files
746
+ else:
747
+ yield path, list(dirs), list(files)
748
+
749
+ if maxdepth is not None:
750
+ maxdepth -= 1
751
+ if maxdepth < 1:
752
+ return
753
+
754
+ for d in dirs:
755
+ async for _ in self._walk(
756
+ full_dirs[d], maxdepth=maxdepth, detail=detail, **kwargs
757
+ ):
758
+ yield _
759
+
760
+ async def _glob(self, path, maxdepth=None, **kwargs):
761
+ if maxdepth is not None and maxdepth < 1:
762
+ raise ValueError("maxdepth must be at least 1")
763
+
764
+ import re
765
+
766
+ seps = (os.path.sep, os.path.altsep) if os.path.altsep else (os.path.sep,)
767
+ ends_with_sep = path.endswith(seps) # _strip_protocol strips trailing slash
768
+ path = self._strip_protocol(path)
769
+ append_slash_to_dirname = ends_with_sep or path.endswith(
770
+ tuple(sep + "**" for sep in seps)
771
+ )
772
+ idx_star = path.find("*") if path.find("*") >= 0 else len(path)
773
+ idx_qmark = path.find("?") if path.find("?") >= 0 else len(path)
774
+ idx_brace = path.find("[") if path.find("[") >= 0 else len(path)
775
+
776
+ min_idx = min(idx_star, idx_qmark, idx_brace)
777
+
778
+ detail = kwargs.pop("detail", False)
779
+
780
+ if not has_magic(path):
781
+ if await self._exists(path, **kwargs):
782
+ if not detail:
783
+ return [path]
784
+ else:
785
+ return {path: await self._info(path, **kwargs)}
786
+ else:
787
+ if not detail:
788
+ return [] # glob of non-existent returns empty
789
+ else:
790
+ return {}
791
+ elif "/" in path[:min_idx]:
792
+ min_idx = path[:min_idx].rindex("/")
793
+ root = path[: min_idx + 1]
794
+ depth = path[min_idx + 1 :].count("/") + 1
795
+ else:
796
+ root = ""
797
+ depth = path[min_idx + 1 :].count("/") + 1
798
+
799
+ if "**" in path:
800
+ if maxdepth is not None:
801
+ idx_double_stars = path.find("**")
802
+ depth_double_stars = path[idx_double_stars:].count("/") + 1
803
+ depth = depth - depth_double_stars + maxdepth
804
+ else:
805
+ depth = None
806
+
807
+ allpaths = await self._find(
808
+ root, maxdepth=depth, withdirs=True, detail=True, **kwargs
809
+ )
810
+
811
+ pattern = glob_translate(path + ("/" if ends_with_sep else ""))
812
+ pattern = re.compile(pattern)
813
+
814
+ out = {
815
+ p: info
816
+ for p, info in sorted(allpaths.items())
817
+ if pattern.match(
818
+ p + "/"
819
+ if append_slash_to_dirname and info["type"] == "directory"
820
+ else p
821
+ )
822
+ }
823
+
824
+ if detail:
825
+ return out
826
+ else:
827
+ return list(out)
828
+
829
+ async def _du(self, path, total=True, maxdepth=None, **kwargs):
830
+ sizes = {}
831
+ # async for?
832
+ for f in await self._find(path, maxdepth=maxdepth, **kwargs):
833
+ info = await self._info(f)
834
+ sizes[info["name"]] = info["size"]
835
+ if total:
836
+ return sum(sizes.values())
837
+ else:
838
+ return sizes
839
+
840
+ async def _find(self, path, maxdepth=None, withdirs=False, **kwargs):
841
+ path = self._strip_protocol(path)
842
+ out = {}
843
+ detail = kwargs.pop("detail", False)
844
+
845
+ # Add the root directory if withdirs is requested
846
+ # This is needed for posix glob compliance
847
+ if withdirs and path != "" and await self._isdir(path):
848
+ out[path] = await self._info(path)
849
+
850
+ # async for?
851
+ async for _, dirs, files in self._walk(path, maxdepth, detail=True, **kwargs):
852
+ if withdirs:
853
+ files.update(dirs)
854
+ out.update({info["name"]: info for name, info in files.items()})
855
+ if not out and (await self._isfile(path)):
856
+ # walk works on directories, but find should also return [path]
857
+ # when path happens to be a file
858
+ out[path] = {}
859
+ names = sorted(out)
860
+ if not detail:
861
+ return names
862
+ else:
863
+ return {name: out[name] for name in names}
864
+
865
+ async def _expand_path(self, path, recursive=False, maxdepth=None):
866
+ if maxdepth is not None and maxdepth < 1:
867
+ raise ValueError("maxdepth must be at least 1")
868
+
869
+ if isinstance(path, str):
870
+ out = await self._expand_path([path], recursive, maxdepth)
871
+ else:
872
+ out = set()
873
+ path = [self._strip_protocol(p) for p in path]
874
+ for p in path: # can gather here
875
+ if has_magic(p):
876
+ bit = set(await self._glob(p, maxdepth=maxdepth))
877
+ out |= bit
878
+ if recursive:
879
+ # glob call above expanded one depth so if maxdepth is defined
880
+ # then decrement it in expand_path call below. If it is zero
881
+ # after decrementing then avoid expand_path call.
882
+ if maxdepth is not None and maxdepth <= 1:
883
+ continue
884
+ out |= set(
885
+ await self._expand_path(
886
+ list(bit),
887
+ recursive=recursive,
888
+ maxdepth=maxdepth - 1 if maxdepth is not None else None,
889
+ )
890
+ )
891
+ continue
892
+ elif recursive:
893
+ rec = set(await self._find(p, maxdepth=maxdepth, withdirs=True))
894
+ out |= rec
895
+ if p not in out and (recursive is False or (await self._exists(p))):
896
+ # should only check once, for the root
897
+ out.add(p)
898
+ if not out:
899
+ raise FileNotFoundError(path)
900
+ return sorted(out)
901
+
902
+ async def _mkdir(self, path, create_parents=True, **kwargs):
903
+ pass # not necessary to implement, may not have directories
904
+
905
+ async def _makedirs(self, path, exist_ok=False):
906
+ pass # not necessary to implement, may not have directories
907
+
908
+ async def open_async(self, path, mode="rb", **kwargs):
909
+ if "b" not in mode or kwargs.get("compression"):
910
+ raise ValueError
911
+ raise NotImplementedError
912
+
913
+
914
+ def mirror_sync_methods(obj):
915
+ """Populate sync and async methods for obj
916
+
917
+ For each method will create a sync version if the name refers to an async method
918
+ (coroutine) and there is no override in the child class; will create an async
919
+ method for the corresponding sync method if there is no implementation.
920
+
921
+ Uses the methods specified in
922
+ - async_methods: the set that an implementation is expected to provide
923
+ - default_async_methods: that can be derived from their sync version in
924
+ AbstractFileSystem
925
+ - AsyncFileSystem: async-specific default coroutines
926
+ """
927
+ from fsspec import AbstractFileSystem
928
+
929
+ for method in async_methods + dir(AsyncFileSystem):
930
+ if not method.startswith("_"):
931
+ continue
932
+ smethod = method[1:]
933
+ if private.match(method):
934
+ isco = inspect.iscoroutinefunction(getattr(obj, method, None))
935
+ unsync = getattr(getattr(obj, smethod, False), "__func__", None)
936
+ is_default = unsync is getattr(AbstractFileSystem, smethod, "")
937
+ if isco and is_default:
938
+ mth = sync_wrapper(getattr(obj, method), obj=obj)
939
+ setattr(obj, smethod, mth)
940
+ if not mth.__doc__:
941
+ mth.__doc__ = getattr(
942
+ getattr(AbstractFileSystem, smethod, None), "__doc__", ""
943
+ )
944
+
945
+
946
+ class FSSpecCoroutineCancel(Exception):
947
+ pass
948
+
949
+
950
+ def _dump_running_tasks(
951
+ printout=True, cancel=True, exc=FSSpecCoroutineCancel, with_task=False
952
+ ):
953
+ import traceback
954
+
955
+ tasks = [t for t in asyncio.tasks.all_tasks(loop[0]) if not t.done()]
956
+ if printout:
957
+ [task.print_stack() for task in tasks]
958
+ out = [
959
+ {
960
+ "locals": task._coro.cr_frame.f_locals,
961
+ "file": task._coro.cr_frame.f_code.co_filename,
962
+ "firstline": task._coro.cr_frame.f_code.co_firstlineno,
963
+ "linelo": task._coro.cr_frame.f_lineno,
964
+ "stack": traceback.format_stack(task._coro.cr_frame),
965
+ "task": task if with_task else None,
966
+ }
967
+ for task in tasks
968
+ ]
969
+ if cancel:
970
+ for t in tasks:
971
+ cbs = t._callbacks
972
+ t.cancel()
973
+ asyncio.futures.Future.set_exception(t, exc)
974
+ asyncio.futures.Future.cancel(t)
975
+ [cb[0](t) for cb in cbs] # cancels any dependent concurrent.futures
976
+ try:
977
+ t._coro.throw(exc) # exits coro, unless explicitly handled
978
+ except exc:
979
+ pass
980
+ return out
981
+
982
+
983
+ class AbstractAsyncStreamedFile(AbstractBufferedFile):
984
+ # no read buffering, and always auto-commit
985
+ # TODO: readahead might still be useful here, but needs async version
986
+
987
+ async def read(self, length=-1):
988
+ """
989
+ Return data from cache, or fetch pieces as necessary
990
+
991
+ Parameters
992
+ ----------
993
+ length: int (-1)
994
+ Number of bytes to read; if <0, all remaining bytes.
995
+ """
996
+ length = -1 if length is None else int(length)
997
+ if self.mode != "rb":
998
+ raise ValueError("File not in read mode")
999
+ if length < 0:
1000
+ length = self.size - self.loc
1001
+ if self.closed:
1002
+ raise ValueError("I/O operation on closed file.")
1003
+ if length == 0:
1004
+ # don't even bother calling fetch
1005
+ return b""
1006
+ out = await self._fetch_range(self.loc, self.loc + length)
1007
+ self.loc += len(out)
1008
+ return out
1009
+
1010
+ async def write(self, data):
1011
+ """
1012
+ Write data to buffer.
1013
+
1014
+ Buffer only sent on flush() or if buffer is greater than
1015
+ or equal to blocksize.
1016
+
1017
+ Parameters
1018
+ ----------
1019
+ data: bytes
1020
+ Set of bytes to be written.
1021
+ """
1022
+ if self.mode not in {"wb", "ab"}:
1023
+ raise ValueError("File not in write mode")
1024
+ if self.closed:
1025
+ raise ValueError("I/O operation on closed file.")
1026
+ if self.forced:
1027
+ raise ValueError("This file has been force-flushed, can only close")
1028
+ out = self.buffer.write(data)
1029
+ self.loc += out
1030
+ if self.buffer.tell() >= self.blocksize:
1031
+ await self.flush()
1032
+ return out
1033
+
1034
+ async def close(self):
1035
+ """Close file
1036
+
1037
+ Finalizes writes, discards cache
1038
+ """
1039
+ if getattr(self, "_unclosable", False):
1040
+ return
1041
+ if self.closed:
1042
+ return
1043
+ if self.mode == "rb":
1044
+ self.cache = None
1045
+ else:
1046
+ if not self.forced:
1047
+ await self.flush(force=True)
1048
+
1049
+ if self.fs is not None:
1050
+ self.fs.invalidate_cache(self.path)
1051
+ self.fs.invalidate_cache(self.fs._parent(self.path))
1052
+
1053
+ self.closed = True
1054
+
1055
+ async def flush(self, force=False):
1056
+ if self.closed:
1057
+ raise ValueError("Flush on closed file")
1058
+ if force and self.forced:
1059
+ raise ValueError("Force flush cannot be called more than once")
1060
+ if force:
1061
+ self.forced = True
1062
+
1063
+ if self.mode not in {"wb", "ab"}:
1064
+ # no-op to flush on read-mode
1065
+ return
1066
+
1067
+ if not force and self.buffer.tell() < self.blocksize:
1068
+ # Defer write on small block
1069
+ return
1070
+
1071
+ if self.offset is None:
1072
+ # Initialize a multipart upload
1073
+ self.offset = 0
1074
+ try:
1075
+ await self._initiate_upload()
1076
+ except:
1077
+ self.closed = True
1078
+ raise
1079
+
1080
+ if await self._upload_chunk(final=force) is not False:
1081
+ self.offset += self.buffer.seek(0, 2)
1082
+ self.buffer = io.BytesIO()
1083
+
1084
+ async def __aenter__(self):
1085
+ return self
1086
+
1087
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
1088
+ await self.close()
1089
+
1090
+ async def _fetch_range(self, start, end):
1091
+ raise NotImplementedError
1092
+
1093
+ async def _initiate_upload(self):
1094
+ pass
1095
+
1096
+ async def _upload_chunk(self, final=False):
1097
+ raise NotImplementedError
.venv/lib/python3.13/site-packages/fsspec/callbacks.py ADDED
@@ -0,0 +1,324 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import wraps
2
+
3
+
4
+ class Callback:
5
+ """
6
+ Base class and interface for callback mechanism
7
+
8
+ This class can be used directly for monitoring file transfers by
9
+ providing ``callback=Callback(hooks=...)`` (see the ``hooks`` argument,
10
+ below), or subclassed for more specialised behaviour.
11
+
12
+ Parameters
13
+ ----------
14
+ size: int (optional)
15
+ Nominal quantity for the value that corresponds to a complete
16
+ transfer, e.g., total number of tiles or total number of
17
+ bytes
18
+ value: int (0)
19
+ Starting internal counter value
20
+ hooks: dict or None
21
+ A dict of named functions to be called on each update. The signature
22
+ of these must be ``f(size, value, **kwargs)``
23
+ """
24
+
25
+ def __init__(self, size=None, value=0, hooks=None, **kwargs):
26
+ self.size = size
27
+ self.value = value
28
+ self.hooks = hooks or {}
29
+ self.kw = kwargs
30
+
31
+ def __enter__(self):
32
+ return self
33
+
34
+ def __exit__(self, *exc_args):
35
+ self.close()
36
+
37
+ def close(self):
38
+ """Close callback."""
39
+
40
+ def branched(self, path_1, path_2, **kwargs):
41
+ """
42
+ Return callback for child transfers
43
+
44
+ If this callback is operating at a higher level, e.g., put, which may
45
+ trigger transfers that can also be monitored. The function returns a callback
46
+ that has to be passed to the child method, e.g., put_file,
47
+ as `callback=` argument.
48
+
49
+ The implementation uses `callback.branch` for compatibility.
50
+ When implementing callbacks, it is recommended to override this function instead
51
+ of `branch` and avoid calling `super().branched(...)`.
52
+
53
+ Prefer using this function over `branch`.
54
+
55
+ Parameters
56
+ ----------
57
+ path_1: str
58
+ Child's source path
59
+ path_2: str
60
+ Child's destination path
61
+ **kwargs:
62
+ Arbitrary keyword arguments
63
+
64
+ Returns
65
+ -------
66
+ callback: Callback
67
+ A callback instance to be passed to the child method
68
+ """
69
+ self.branch(path_1, path_2, kwargs)
70
+ # mutate kwargs so that we can force the caller to pass "callback=" explicitly
71
+ return kwargs.pop("callback", DEFAULT_CALLBACK)
72
+
73
+ def branch_coro(self, fn):
74
+ """
75
+ Wraps a coroutine, and pass a new child callback to it.
76
+ """
77
+
78
+ @wraps(fn)
79
+ async def func(path1, path2: str, **kwargs):
80
+ with self.branched(path1, path2, **kwargs) as child:
81
+ return await fn(path1, path2, callback=child, **kwargs)
82
+
83
+ return func
84
+
85
+ def set_size(self, size):
86
+ """
87
+ Set the internal maximum size attribute
88
+
89
+ Usually called if not initially set at instantiation. Note that this
90
+ triggers a ``call()``.
91
+
92
+ Parameters
93
+ ----------
94
+ size: int
95
+ """
96
+ self.size = size
97
+ self.call()
98
+
99
+ def absolute_update(self, value):
100
+ """
101
+ Set the internal value state
102
+
103
+ Triggers ``call()``
104
+
105
+ Parameters
106
+ ----------
107
+ value: int
108
+ """
109
+ self.value = value
110
+ self.call()
111
+
112
+ def relative_update(self, inc=1):
113
+ """
114
+ Delta increment the internal counter
115
+
116
+ Triggers ``call()``
117
+
118
+ Parameters
119
+ ----------
120
+ inc: int
121
+ """
122
+ self.value += inc
123
+ self.call()
124
+
125
+ def call(self, hook_name=None, **kwargs):
126
+ """
127
+ Execute hook(s) with current state
128
+
129
+ Each function is passed the internal size and current value
130
+
131
+ Parameters
132
+ ----------
133
+ hook_name: str or None
134
+ If given, execute on this hook
135
+ kwargs: passed on to (all) hook(s)
136
+ """
137
+ if not self.hooks:
138
+ return
139
+ kw = self.kw.copy()
140
+ kw.update(kwargs)
141
+ if hook_name:
142
+ if hook_name not in self.hooks:
143
+ return
144
+ return self.hooks[hook_name](self.size, self.value, **kw)
145
+ for hook in self.hooks.values() or []:
146
+ hook(self.size, self.value, **kw)
147
+
148
+ def wrap(self, iterable):
149
+ """
150
+ Wrap an iterable to call ``relative_update`` on each iterations
151
+
152
+ Parameters
153
+ ----------
154
+ iterable: Iterable
155
+ The iterable that is being wrapped
156
+ """
157
+ for item in iterable:
158
+ self.relative_update()
159
+ yield item
160
+
161
+ def branch(self, path_1, path_2, kwargs):
162
+ """
163
+ Set callbacks for child transfers
164
+
165
+ If this callback is operating at a higher level, e.g., put, which may
166
+ trigger transfers that can also be monitored. The passed kwargs are
167
+ to be *mutated* to add ``callback=``, if this class supports branching
168
+ to children.
169
+
170
+ Parameters
171
+ ----------
172
+ path_1: str
173
+ Child's source path
174
+ path_2: str
175
+ Child's destination path
176
+ kwargs: dict
177
+ arguments passed to child method, e.g., put_file.
178
+
179
+ Returns
180
+ -------
181
+
182
+ """
183
+ return None
184
+
185
+ def no_op(self, *_, **__):
186
+ pass
187
+
188
+ def __getattr__(self, item):
189
+ """
190
+ If undefined methods are called on this class, nothing happens
191
+ """
192
+ return self.no_op
193
+
194
+ @classmethod
195
+ def as_callback(cls, maybe_callback=None):
196
+ """Transform callback=... into Callback instance
197
+
198
+ For the special value of ``None``, return the global instance of
199
+ ``NoOpCallback``. This is an alternative to including
200
+ ``callback=DEFAULT_CALLBACK`` directly in a method signature.
201
+ """
202
+ if maybe_callback is None:
203
+ return DEFAULT_CALLBACK
204
+ return maybe_callback
205
+
206
+
207
+ class NoOpCallback(Callback):
208
+ """
209
+ This implementation of Callback does exactly nothing
210
+ """
211
+
212
+ def call(self, *args, **kwargs):
213
+ return None
214
+
215
+
216
+ class DotPrinterCallback(Callback):
217
+ """
218
+ Simple example Callback implementation
219
+
220
+ Almost identical to Callback with a hook that prints a char; here we
221
+ demonstrate how the outer layer may print "#" and the inner layer "."
222
+ """
223
+
224
+ def __init__(self, chr_to_print="#", **kwargs):
225
+ self.chr = chr_to_print
226
+ super().__init__(**kwargs)
227
+
228
+ def branch(self, path_1, path_2, kwargs):
229
+ """Mutate kwargs to add new instance with different print char"""
230
+ kwargs["callback"] = DotPrinterCallback(".")
231
+
232
+ def call(self, **kwargs):
233
+ """Just outputs a character"""
234
+ print(self.chr, end="")
235
+
236
+
237
+ class TqdmCallback(Callback):
238
+ """
239
+ A callback to display a progress bar using tqdm
240
+
241
+ Parameters
242
+ ----------
243
+ tqdm_kwargs : dict, (optional)
244
+ Any argument accepted by the tqdm constructor.
245
+ See the `tqdm doc <https://tqdm.github.io/docs/tqdm/#__init__>`_.
246
+ Will be forwarded to `tqdm_cls`.
247
+ tqdm_cls: (optional)
248
+ subclass of `tqdm.tqdm`. If not passed, it will default to `tqdm.tqdm`.
249
+
250
+ Examples
251
+ --------
252
+ >>> import fsspec
253
+ >>> from fsspec.callbacks import TqdmCallback
254
+ >>> fs = fsspec.filesystem("memory")
255
+ >>> path2distant_data = "/your-path"
256
+ >>> fs.upload(
257
+ ".",
258
+ path2distant_data,
259
+ recursive=True,
260
+ callback=TqdmCallback(),
261
+ )
262
+
263
+ You can forward args to tqdm using the ``tqdm_kwargs`` parameter.
264
+
265
+ >>> fs.upload(
266
+ ".",
267
+ path2distant_data,
268
+ recursive=True,
269
+ callback=TqdmCallback(tqdm_kwargs={"desc": "Your tqdm description"}),
270
+ )
271
+
272
+ You can also customize the progress bar by passing a subclass of `tqdm`.
273
+
274
+ .. code-block:: python
275
+
276
+ class TqdmFormat(tqdm):
277
+ '''Provides a `total_time` format parameter'''
278
+ @property
279
+ def format_dict(self):
280
+ d = super().format_dict
281
+ total_time = d["elapsed"] * (d["total"] or 0) / max(d["n"], 1)
282
+ d.update(total_time=self.format_interval(total_time) + " in total")
283
+ return d
284
+
285
+ >>> with TqdmCallback(
286
+ tqdm_kwargs={
287
+ "desc": "desc",
288
+ "bar_format": "{total_time}: {percentage:.0f}%|{bar}{r_bar}",
289
+ },
290
+ tqdm_cls=TqdmFormat,
291
+ ) as callback:
292
+ fs.upload(".", path2distant_data, recursive=True, callback=callback)
293
+ """
294
+
295
+ def __init__(self, tqdm_kwargs=None, *args, **kwargs):
296
+ try:
297
+ from tqdm import tqdm
298
+
299
+ except ImportError as exce:
300
+ raise ImportError(
301
+ "Using TqdmCallback requires tqdm to be installed"
302
+ ) from exce
303
+
304
+ self._tqdm_cls = kwargs.pop("tqdm_cls", tqdm)
305
+ self._tqdm_kwargs = tqdm_kwargs or {}
306
+ self.tqdm = None
307
+ super().__init__(*args, **kwargs)
308
+
309
+ def call(self, *args, **kwargs):
310
+ if self.tqdm is None:
311
+ self.tqdm = self._tqdm_cls(total=self.size, **self._tqdm_kwargs)
312
+ self.tqdm.total = self.size
313
+ self.tqdm.update(self.value - self.tqdm.n)
314
+
315
+ def close(self):
316
+ if self.tqdm is not None:
317
+ self.tqdm.close()
318
+ self.tqdm = None
319
+
320
+ def __del__(self):
321
+ return self.close()
322
+
323
+
324
+ DEFAULT_CALLBACK = _DEFAULT_CALLBACK = NoOpCallback()
.venv/lib/python3.13/site-packages/fsspec/compression.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Helper functions for a standard streaming compression API"""
2
+
3
+ from zipfile import ZipFile
4
+
5
+ import fsspec.utils
6
+ from fsspec.spec import AbstractBufferedFile
7
+
8
+
9
+ def noop_file(file, mode, **kwargs):
10
+ return file
11
+
12
+
13
+ # TODO: files should also be available as contexts
14
+ # should be functions of the form func(infile, mode=, **kwargs) -> file-like
15
+ compr = {None: noop_file}
16
+
17
+
18
+ def register_compression(name, callback, extensions, force=False):
19
+ """Register an "inferable" file compression type.
20
+
21
+ Registers transparent file compression type for use with fsspec.open.
22
+ Compression can be specified by name in open, or "infer"-ed for any files
23
+ ending with the given extensions.
24
+
25
+ Args:
26
+ name: (str) The compression type name. Eg. "gzip".
27
+ callback: A callable of form (infile, mode, **kwargs) -> file-like.
28
+ Accepts an input file-like object, the target mode and kwargs.
29
+ Returns a wrapped file-like object.
30
+ extensions: (str, Iterable[str]) A file extension, or list of file
31
+ extensions for which to infer this compression scheme. Eg. "gz".
32
+ force: (bool) Force re-registration of compression type or extensions.
33
+
34
+ Raises:
35
+ ValueError: If name or extensions already registered, and not force.
36
+
37
+ """
38
+ if isinstance(extensions, str):
39
+ extensions = [extensions]
40
+
41
+ # Validate registration
42
+ if name in compr and not force:
43
+ raise ValueError(f"Duplicate compression registration: {name}")
44
+
45
+ for ext in extensions:
46
+ if ext in fsspec.utils.compressions and not force:
47
+ raise ValueError(f"Duplicate compression file extension: {ext} ({name})")
48
+
49
+ compr[name] = callback
50
+
51
+ for ext in extensions:
52
+ fsspec.utils.compressions[ext] = name
53
+
54
+
55
+ def unzip(infile, mode="rb", filename=None, **kwargs):
56
+ if "r" not in mode:
57
+ filename = filename or "file"
58
+ z = ZipFile(infile, mode="w", **kwargs)
59
+ fo = z.open(filename, mode="w")
60
+ fo.close = lambda closer=fo.close: closer() or z.close()
61
+ return fo
62
+ z = ZipFile(infile)
63
+ if filename is None:
64
+ filename = z.namelist()[0]
65
+ return z.open(filename, mode="r", **kwargs)
66
+
67
+
68
+ register_compression("zip", unzip, "zip")
69
+
70
+ try:
71
+ from bz2 import BZ2File
72
+ except ImportError:
73
+ pass
74
+ else:
75
+ register_compression("bz2", BZ2File, "bz2")
76
+
77
+ try: # pragma: no cover
78
+ from isal import igzip
79
+
80
+ def isal(infile, mode="rb", **kwargs):
81
+ return igzip.IGzipFile(fileobj=infile, mode=mode, **kwargs)
82
+
83
+ register_compression("gzip", isal, "gz")
84
+ except ImportError:
85
+ from gzip import GzipFile
86
+
87
+ register_compression(
88
+ "gzip", lambda f, **kwargs: GzipFile(fileobj=f, **kwargs), "gz"
89
+ )
90
+
91
+ try:
92
+ from lzma import LZMAFile
93
+
94
+ register_compression("lzma", LZMAFile, "lzma")
95
+ register_compression("xz", LZMAFile, "xz")
96
+ except ImportError:
97
+ pass
98
+
99
+ try:
100
+ import lzmaffi
101
+
102
+ register_compression("lzma", lzmaffi.LZMAFile, "lzma", force=True)
103
+ register_compression("xz", lzmaffi.LZMAFile, "xz", force=True)
104
+ except ImportError:
105
+ pass
106
+
107
+
108
+ class SnappyFile(AbstractBufferedFile):
109
+ def __init__(self, infile, mode, **kwargs):
110
+ import snappy
111
+
112
+ super().__init__(
113
+ fs=None, path="snappy", mode=mode.strip("b") + "b", size=999999999, **kwargs
114
+ )
115
+ self.infile = infile
116
+ if "r" in mode:
117
+ self.codec = snappy.StreamDecompressor()
118
+ else:
119
+ self.codec = snappy.StreamCompressor()
120
+
121
+ def _upload_chunk(self, final=False):
122
+ self.buffer.seek(0)
123
+ out = self.codec.add_chunk(self.buffer.read())
124
+ self.infile.write(out)
125
+ return True
126
+
127
+ def seek(self, loc, whence=0):
128
+ raise NotImplementedError("SnappyFile is not seekable")
129
+
130
+ def seekable(self):
131
+ return False
132
+
133
+ def _fetch_range(self, start, end):
134
+ """Get the specified set of bytes from remote"""
135
+ data = self.infile.read(end - start)
136
+ return self.codec.decompress(data)
137
+
138
+
139
+ try:
140
+ import snappy
141
+
142
+ snappy.compress(b"")
143
+ # Snappy may use the .sz file extension, but this is not part of the
144
+ # standard implementation.
145
+ register_compression("snappy", SnappyFile, [])
146
+
147
+ except (ImportError, NameError, AttributeError):
148
+ pass
149
+
150
+ try:
151
+ import lz4.frame
152
+
153
+ register_compression("lz4", lz4.frame.open, "lz4")
154
+ except ImportError:
155
+ pass
156
+
157
+ try:
158
+ # zstd in the standard library for python >= 3.14
159
+ from compression.zstd import ZstdFile
160
+
161
+ register_compression("zstd", ZstdFile, "zst")
162
+
163
+ except ImportError:
164
+ try:
165
+ import zstandard as zstd
166
+
167
+ def zstandard_file(infile, mode="rb"):
168
+ if "r" in mode:
169
+ cctx = zstd.ZstdDecompressor()
170
+ return cctx.stream_reader(infile)
171
+ else:
172
+ cctx = zstd.ZstdCompressor(level=10)
173
+ return cctx.stream_writer(infile)
174
+
175
+ register_compression("zstd", zstandard_file, "zst")
176
+ except ImportError:
177
+ pass
178
+
179
+
180
+ def available_compressions():
181
+ """Return a list of the implemented compressions."""
182
+ return list(compr)
.venv/lib/python3.13/site-packages/fsspec/config.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import configparser
4
+ import json
5
+ import os
6
+ import warnings
7
+ from typing import Any
8
+
9
+ conf: dict[str, dict[str, Any]] = {}
10
+ default_conf_dir = os.path.join(os.path.expanduser("~"), ".config/fsspec")
11
+ conf_dir = os.environ.get("FSSPEC_CONFIG_DIR", default_conf_dir)
12
+
13
+
14
+ def set_conf_env(conf_dict, envdict=os.environ):
15
+ """Set config values from environment variables
16
+
17
+ Looks for variables of the form ``FSSPEC_<protocol>`` and
18
+ ``FSSPEC_<protocol>_<kwarg>``. For ``FSSPEC_<protocol>`` the value is parsed
19
+ as a json dictionary and used to ``update`` the config of the
20
+ corresponding protocol. For ``FSSPEC_<protocol>_<kwarg>`` there is no
21
+ attempt to convert the string value, but the kwarg keys will be lower-cased.
22
+
23
+ The ``FSSPEC_<protocol>_<kwarg>`` variables are applied after the
24
+ ``FSSPEC_<protocol>`` ones.
25
+
26
+ Parameters
27
+ ----------
28
+ conf_dict : dict(str, dict)
29
+ This dict will be mutated
30
+ envdict : dict-like(str, str)
31
+ Source for the values - usually the real environment
32
+ """
33
+ kwarg_keys = []
34
+ for key in envdict:
35
+ if key.startswith("FSSPEC_") and len(key) > 7 and key[7] != "_":
36
+ if key.count("_") > 1:
37
+ kwarg_keys.append(key)
38
+ continue
39
+ try:
40
+ value = json.loads(envdict[key])
41
+ except json.decoder.JSONDecodeError as ex:
42
+ warnings.warn(
43
+ f"Ignoring environment variable {key} due to a parse failure: {ex}"
44
+ )
45
+ else:
46
+ if isinstance(value, dict):
47
+ _, proto = key.split("_", 1)
48
+ conf_dict.setdefault(proto.lower(), {}).update(value)
49
+ else:
50
+ warnings.warn(
51
+ f"Ignoring environment variable {key} due to not being a dict:"
52
+ f" {type(value)}"
53
+ )
54
+ elif key.startswith("FSSPEC"):
55
+ warnings.warn(
56
+ f"Ignoring environment variable {key} due to having an unexpected name"
57
+ )
58
+
59
+ for key in kwarg_keys:
60
+ _, proto, kwarg = key.split("_", 2)
61
+ conf_dict.setdefault(proto.lower(), {})[kwarg.lower()] = envdict[key]
62
+
63
+
64
+ def set_conf_files(cdir, conf_dict):
65
+ """Set config values from files
66
+
67
+ Scans for INI and JSON files in the given dictionary, and uses their
68
+ contents to set the config. In case of repeated values, later values
69
+ win.
70
+
71
+ In the case of INI files, all values are strings, and these will not
72
+ be converted.
73
+
74
+ Parameters
75
+ ----------
76
+ cdir : str
77
+ Directory to search
78
+ conf_dict : dict(str, dict)
79
+ This dict will be mutated
80
+ """
81
+ if not os.path.isdir(cdir):
82
+ return
83
+ allfiles = sorted(os.listdir(cdir))
84
+ for fn in allfiles:
85
+ if fn.endswith(".ini"):
86
+ ini = configparser.ConfigParser()
87
+ ini.read(os.path.join(cdir, fn))
88
+ for key in ini:
89
+ if key == "DEFAULT":
90
+ continue
91
+ conf_dict.setdefault(key, {}).update(dict(ini[key]))
92
+ if fn.endswith(".json"):
93
+ with open(os.path.join(cdir, fn)) as f:
94
+ js = json.load(f)
95
+ for key in js:
96
+ conf_dict.setdefault(key, {}).update(dict(js[key]))
97
+
98
+
99
+ def apply_config(cls, kwargs, conf_dict=None):
100
+ """Supply default values for kwargs when instantiating class
101
+
102
+ Augments the passed kwargs, by finding entries in the config dict
103
+ which match the classes ``.protocol`` attribute (one or more str)
104
+
105
+ Parameters
106
+ ----------
107
+ cls : file system implementation
108
+ kwargs : dict
109
+ conf_dict : dict of dict
110
+ Typically this is the global configuration
111
+
112
+ Returns
113
+ -------
114
+ dict : the modified set of kwargs
115
+ """
116
+ if conf_dict is None:
117
+ conf_dict = conf
118
+ protos = cls.protocol if isinstance(cls.protocol, (tuple, list)) else [cls.protocol]
119
+ kw = {}
120
+ for proto in protos:
121
+ # default kwargs from the current state of the config
122
+ if proto in conf_dict:
123
+ kw.update(conf_dict[proto])
124
+ # explicit kwargs always win
125
+ kw.update(**kwargs)
126
+ kwargs = kw
127
+ return kwargs
128
+
129
+
130
+ set_conf_files(conf_dir, conf)
131
+ set_conf_env(conf)
.venv/lib/python3.13/site-packages/fsspec/conftest.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import shutil
3
+ import subprocess
4
+ import sys
5
+ import time
6
+
7
+ import pytest
8
+
9
+ import fsspec
10
+ from fsspec.implementations.cached import CachingFileSystem
11
+
12
+
13
+ @pytest.fixture()
14
+ def m():
15
+ """
16
+ Fixture providing a memory filesystem.
17
+ """
18
+ m = fsspec.filesystem("memory")
19
+ m.store.clear()
20
+ m.pseudo_dirs.clear()
21
+ m.pseudo_dirs.append("")
22
+ try:
23
+ yield m
24
+ finally:
25
+ m.store.clear()
26
+ m.pseudo_dirs.clear()
27
+ m.pseudo_dirs.append("")
28
+
29
+
30
+ @pytest.fixture
31
+ def ftp_writable(tmpdir):
32
+ """
33
+ Fixture providing a writable FTP filesystem.
34
+ """
35
+ pytest.importorskip("pyftpdlib")
36
+ from fsspec.implementations.ftp import FTPFileSystem
37
+
38
+ FTPFileSystem.clear_instance_cache() # remove lingering connections
39
+ CachingFileSystem.clear_instance_cache()
40
+ d = str(tmpdir)
41
+ with open(os.path.join(d, "out"), "wb") as f:
42
+ f.write(b"hello" * 10000)
43
+ P = subprocess.Popen(
44
+ [sys.executable, "-m", "pyftpdlib", "-d", d, "-u", "user", "-P", "pass", "-w"]
45
+ )
46
+ try:
47
+ time.sleep(1)
48
+ yield "localhost", 2121, "user", "pass"
49
+ finally:
50
+ P.terminate()
51
+ P.wait()
52
+ try:
53
+ shutil.rmtree(tmpdir)
54
+ except Exception:
55
+ pass
.venv/lib/python3.13/site-packages/fsspec/core.py ADDED
@@ -0,0 +1,743 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import io
4
+ import logging
5
+ import os
6
+ import re
7
+ from glob import has_magic
8
+ from pathlib import Path
9
+
10
+ # for backwards compat, we export cache things from here too
11
+ from fsspec.caching import ( # noqa: F401
12
+ BaseCache,
13
+ BlockCache,
14
+ BytesCache,
15
+ MMapCache,
16
+ ReadAheadCache,
17
+ caches,
18
+ )
19
+ from fsspec.compression import compr
20
+ from fsspec.config import conf
21
+ from fsspec.registry import filesystem, get_filesystem_class
22
+ from fsspec.utils import (
23
+ _unstrip_protocol,
24
+ build_name_function,
25
+ infer_compression,
26
+ stringify_path,
27
+ )
28
+
29
+ logger = logging.getLogger("fsspec")
30
+
31
+
32
+ class OpenFile:
33
+ """
34
+ File-like object to be used in a context
35
+
36
+ Can layer (buffered) text-mode and compression over any file-system, which
37
+ are typically binary-only.
38
+
39
+ These instances are safe to serialize, as the low-level file object
40
+ is not created until invoked using ``with``.
41
+
42
+ Parameters
43
+ ----------
44
+ fs: FileSystem
45
+ The file system to use for opening the file. Should be a subclass or duck-type
46
+ with ``fsspec.spec.AbstractFileSystem``
47
+ path: str
48
+ Location to open
49
+ mode: str like 'rb', optional
50
+ Mode of the opened file
51
+ compression: str or None, optional
52
+ Compression to apply
53
+ encoding: str or None, optional
54
+ The encoding to use if opened in text mode.
55
+ errors: str or None, optional
56
+ How to handle encoding errors if opened in text mode.
57
+ newline: None or str
58
+ Passed to TextIOWrapper in text mode, how to handle line endings.
59
+ autoopen: bool
60
+ If True, calls open() immediately. Mostly used by pickle
61
+ pos: int
62
+ If given and autoopen is True, seek to this location immediately
63
+ """
64
+
65
+ def __init__(
66
+ self,
67
+ fs,
68
+ path,
69
+ mode="rb",
70
+ compression=None,
71
+ encoding=None,
72
+ errors=None,
73
+ newline=None,
74
+ ):
75
+ self.fs = fs
76
+ self.path = path
77
+ self.mode = mode
78
+ self.compression = get_compression(path, compression)
79
+ self.encoding = encoding
80
+ self.errors = errors
81
+ self.newline = newline
82
+ self.fobjects = []
83
+
84
+ def __reduce__(self):
85
+ return (
86
+ OpenFile,
87
+ (
88
+ self.fs,
89
+ self.path,
90
+ self.mode,
91
+ self.compression,
92
+ self.encoding,
93
+ self.errors,
94
+ self.newline,
95
+ ),
96
+ )
97
+
98
+ def __repr__(self):
99
+ return f"<OpenFile '{self.path}'>"
100
+
101
+ def __enter__(self):
102
+ mode = self.mode.replace("t", "").replace("b", "") + "b"
103
+
104
+ try:
105
+ f = self.fs.open(self.path, mode=mode)
106
+ except FileNotFoundError as e:
107
+ if has_magic(self.path):
108
+ raise FileNotFoundError(
109
+ "%s not found. The URL contains glob characters: you maybe needed\n"
110
+ "to pass expand=True in fsspec.open() or the storage_options of \n"
111
+ "your library. You can also set the config value 'open_expand'\n"
112
+ "before import, or fsspec.core.DEFAULT_EXPAND at runtime, to True.",
113
+ self.path,
114
+ ) from e
115
+ raise
116
+
117
+ self.fobjects = [f]
118
+
119
+ if self.compression is not None:
120
+ compress = compr[self.compression]
121
+ f = compress(f, mode=mode[0])
122
+ self.fobjects.append(f)
123
+
124
+ if "b" not in self.mode:
125
+ # assume, for example, that 'r' is equivalent to 'rt' as in builtin
126
+ f = PickleableTextIOWrapper(
127
+ f, encoding=self.encoding, errors=self.errors, newline=self.newline
128
+ )
129
+ self.fobjects.append(f)
130
+
131
+ return self.fobjects[-1]
132
+
133
+ def __exit__(self, *args):
134
+ self.close()
135
+
136
+ @property
137
+ def full_name(self):
138
+ return _unstrip_protocol(self.path, self.fs)
139
+
140
+ def open(self):
141
+ """Materialise this as a real open file without context
142
+
143
+ The OpenFile object should be explicitly closed to avoid enclosed file
144
+ instances persisting. You must, therefore, keep a reference to the OpenFile
145
+ during the life of the file-like it generates.
146
+ """
147
+ return self.__enter__()
148
+
149
+ def close(self):
150
+ """Close all encapsulated file objects"""
151
+ for f in reversed(self.fobjects):
152
+ if "r" not in self.mode and not f.closed:
153
+ f.flush()
154
+ f.close()
155
+ self.fobjects.clear()
156
+
157
+
158
+ class OpenFiles(list):
159
+ """List of OpenFile instances
160
+
161
+ Can be used in a single context, which opens and closes all of the
162
+ contained files. Normal list access to get the elements works as
163
+ normal.
164
+
165
+ A special case is made for caching filesystems - the files will
166
+ be down/uploaded together at the start or end of the context, and
167
+ this may happen concurrently, if the target filesystem supports it.
168
+ """
169
+
170
+ def __init__(self, *args, mode="rb", fs=None):
171
+ self.mode = mode
172
+ self.fs = fs
173
+ self.files = []
174
+ super().__init__(*args)
175
+
176
+ def __enter__(self):
177
+ if self.fs is None:
178
+ raise ValueError("Context has already been used")
179
+
180
+ fs = self.fs
181
+ while True:
182
+ if hasattr(fs, "open_many"):
183
+ # check for concurrent cache download; or set up for upload
184
+ self.files = fs.open_many(self)
185
+ return self.files
186
+ if hasattr(fs, "fs") and fs.fs is not None:
187
+ fs = fs.fs
188
+ else:
189
+ break
190
+ return [s.__enter__() for s in self]
191
+
192
+ def __exit__(self, *args):
193
+ fs = self.fs
194
+ [s.__exit__(*args) for s in self]
195
+ if "r" not in self.mode:
196
+ while True:
197
+ if hasattr(fs, "open_many"):
198
+ # check for concurrent cache upload
199
+ fs.commit_many(self.files)
200
+ return
201
+ if hasattr(fs, "fs") and fs.fs is not None:
202
+ fs = fs.fs
203
+ else:
204
+ break
205
+
206
+ def __getitem__(self, item):
207
+ out = super().__getitem__(item)
208
+ if isinstance(item, slice):
209
+ return OpenFiles(out, mode=self.mode, fs=self.fs)
210
+ return out
211
+
212
+ def __repr__(self):
213
+ return f"<List of {len(self)} OpenFile instances>"
214
+
215
+
216
+ def open_files(
217
+ urlpath,
218
+ mode="rb",
219
+ compression=None,
220
+ encoding="utf8",
221
+ errors=None,
222
+ name_function=None,
223
+ num=1,
224
+ protocol=None,
225
+ newline=None,
226
+ auto_mkdir=True,
227
+ expand=True,
228
+ **kwargs,
229
+ ):
230
+ """Given a path or paths, return a list of ``OpenFile`` objects.
231
+
232
+ For writing, a str path must contain the "*" character, which will be filled
233
+ in by increasing numbers, e.g., "part*" -> "part1", "part2" if num=2.
234
+
235
+ For either reading or writing, can instead provide explicit list of paths.
236
+
237
+ Parameters
238
+ ----------
239
+ urlpath: string or list
240
+ Absolute or relative filepath(s). Prefix with a protocol like ``s3://``
241
+ to read from alternative filesystems. To read from multiple files you
242
+ can pass a globstring or a list of paths, with the caveat that they
243
+ must all have the same protocol.
244
+ mode: 'rb', 'wt', etc.
245
+ compression: string or None
246
+ If given, open file using compression codec. Can either be a compression
247
+ name (a key in ``fsspec.compression.compr``) or "infer" to guess the
248
+ compression from the filename suffix.
249
+ encoding: str
250
+ For text mode only
251
+ errors: None or str
252
+ Passed to TextIOWrapper in text mode
253
+ name_function: function or None
254
+ if opening a set of files for writing, those files do not yet exist,
255
+ so we need to generate their names by formatting the urlpath for
256
+ each sequence number
257
+ num: int [1]
258
+ if writing mode, number of files we expect to create (passed to
259
+ name+function)
260
+ protocol: str or None
261
+ If given, overrides the protocol found in the URL.
262
+ newline: bytes or None
263
+ Used for line terminator in text mode. If None, uses system default;
264
+ if blank, uses no translation.
265
+ auto_mkdir: bool (True)
266
+ If in write mode, this will ensure the target directory exists before
267
+ writing, by calling ``fs.mkdirs(exist_ok=True)``.
268
+ expand: bool
269
+ **kwargs: dict
270
+ Extra options that make sense to a particular storage connection, e.g.
271
+ host, port, username, password, etc.
272
+
273
+ Examples
274
+ --------
275
+ >>> files = open_files('2015-*-*.csv') # doctest: +SKIP
276
+ >>> files = open_files(
277
+ ... 's3://bucket/2015-*-*.csv.gz', compression='gzip'
278
+ ... ) # doctest: +SKIP
279
+
280
+ Returns
281
+ -------
282
+ An ``OpenFiles`` instance, which is a list of ``OpenFile`` objects that can
283
+ be used as a single context
284
+
285
+ Notes
286
+ -----
287
+ For a full list of the available protocols and the implementations that
288
+ they map across to see the latest online documentation:
289
+
290
+ - For implementations built into ``fsspec`` see
291
+ https://filesystem-spec.readthedocs.io/en/latest/api.html#built-in-implementations
292
+ - For implementations in separate packages see
293
+ https://filesystem-spec.readthedocs.io/en/latest/api.html#other-known-implementations
294
+ """
295
+ fs, fs_token, paths = get_fs_token_paths(
296
+ urlpath,
297
+ mode,
298
+ num=num,
299
+ name_function=name_function,
300
+ storage_options=kwargs,
301
+ protocol=protocol,
302
+ expand=expand,
303
+ )
304
+ if fs.protocol == "file":
305
+ fs.auto_mkdir = auto_mkdir
306
+ elif "r" not in mode and auto_mkdir:
307
+ parents = {fs._parent(path) for path in paths}
308
+ for parent in parents:
309
+ try:
310
+ fs.makedirs(parent, exist_ok=True)
311
+ except PermissionError:
312
+ pass
313
+ return OpenFiles(
314
+ [
315
+ OpenFile(
316
+ fs,
317
+ path,
318
+ mode=mode,
319
+ compression=compression,
320
+ encoding=encoding,
321
+ errors=errors,
322
+ newline=newline,
323
+ )
324
+ for path in paths
325
+ ],
326
+ mode=mode,
327
+ fs=fs,
328
+ )
329
+
330
+
331
+ def _un_chain(path, kwargs):
332
+ # Avoid a circular import
333
+ from fsspec.implementations.cached import CachingFileSystem
334
+
335
+ if "::" in path:
336
+ x = re.compile(".*[^a-z]+.*") # test for non protocol-like single word
337
+ bits = []
338
+ for p in path.split("::"):
339
+ if "://" in p or x.match(p):
340
+ bits.append(p)
341
+ else:
342
+ bits.append(p + "://")
343
+ else:
344
+ bits = [path]
345
+ # [[url, protocol, kwargs], ...]
346
+ out = []
347
+ previous_bit = None
348
+ kwargs = kwargs.copy()
349
+ for bit in reversed(bits):
350
+ protocol = kwargs.pop("protocol", None) or split_protocol(bit)[0] or "file"
351
+ cls = get_filesystem_class(protocol)
352
+ extra_kwargs = cls._get_kwargs_from_urls(bit)
353
+ kws = kwargs.pop(protocol, {})
354
+ if bit is bits[0]:
355
+ kws.update(kwargs)
356
+ kw = dict(
357
+ **{k: v for k, v in extra_kwargs.items() if k not in kws or v != kws[k]},
358
+ **kws,
359
+ )
360
+ bit = cls._strip_protocol(bit)
361
+ if "target_protocol" not in kw and issubclass(cls, CachingFileSystem):
362
+ bit = previous_bit
363
+ out.append((bit, protocol, kw))
364
+ previous_bit = bit
365
+ out.reverse()
366
+ return out
367
+
368
+
369
+ def url_to_fs(url, **kwargs):
370
+ """
371
+ Turn fully-qualified and potentially chained URL into filesystem instance
372
+
373
+ Parameters
374
+ ----------
375
+ url : str
376
+ The fsspec-compatible URL
377
+ **kwargs: dict
378
+ Extra options that make sense to a particular storage connection, e.g.
379
+ host, port, username, password, etc.
380
+
381
+ Returns
382
+ -------
383
+ filesystem : FileSystem
384
+ The new filesystem discovered from ``url`` and created with
385
+ ``**kwargs``.
386
+ urlpath : str
387
+ The file-systems-specific URL for ``url``.
388
+ """
389
+ url = stringify_path(url)
390
+ # non-FS arguments that appear in fsspec.open()
391
+ # inspect could keep this in sync with open()'s signature
392
+ known_kwargs = {
393
+ "compression",
394
+ "encoding",
395
+ "errors",
396
+ "expand",
397
+ "mode",
398
+ "name_function",
399
+ "newline",
400
+ "num",
401
+ }
402
+ kwargs = {k: v for k, v in kwargs.items() if k not in known_kwargs}
403
+ chain = _un_chain(url, kwargs)
404
+ inkwargs = {}
405
+ # Reverse iterate the chain, creating a nested target_* structure
406
+ for i, ch in enumerate(reversed(chain)):
407
+ urls, protocol, kw = ch
408
+ if i == len(chain) - 1:
409
+ inkwargs = dict(**kw, **inkwargs)
410
+ continue
411
+ inkwargs["target_options"] = dict(**kw, **inkwargs)
412
+ inkwargs["target_protocol"] = protocol
413
+ inkwargs["fo"] = urls
414
+ urlpath, protocol, _ = chain[0]
415
+ fs = filesystem(protocol, **inkwargs)
416
+ return fs, urlpath
417
+
418
+
419
+ DEFAULT_EXPAND = conf.get("open_expand", False)
420
+
421
+
422
+ def open(
423
+ urlpath,
424
+ mode="rb",
425
+ compression=None,
426
+ encoding="utf8",
427
+ errors=None,
428
+ protocol=None,
429
+ newline=None,
430
+ expand=None,
431
+ **kwargs,
432
+ ):
433
+ """Given a path or paths, return one ``OpenFile`` object.
434
+
435
+ Parameters
436
+ ----------
437
+ urlpath: string or list
438
+ Absolute or relative filepath. Prefix with a protocol like ``s3://``
439
+ to read from alternative filesystems. Should not include glob
440
+ character(s).
441
+ mode: 'rb', 'wt', etc.
442
+ compression: string or None
443
+ If given, open file using compression codec. Can either be a compression
444
+ name (a key in ``fsspec.compression.compr``) or "infer" to guess the
445
+ compression from the filename suffix.
446
+ encoding: str
447
+ For text mode only
448
+ errors: None or str
449
+ Passed to TextIOWrapper in text mode
450
+ protocol: str or None
451
+ If given, overrides the protocol found in the URL.
452
+ newline: bytes or None
453
+ Used for line terminator in text mode. If None, uses system default;
454
+ if blank, uses no translation.
455
+ expand: bool or None
456
+ Whether to regard file paths containing special glob characters as needing
457
+ expansion (finding the first match) or absolute. Setting False allows using
458
+ paths which do embed such characters. If None (default), this argument
459
+ takes its value from the DEFAULT_EXPAND module variable, which takes
460
+ its initial value from the "open_expand" config value at startup, which will
461
+ be False if not set.
462
+ **kwargs: dict
463
+ Extra options that make sense to a particular storage connection, e.g.
464
+ host, port, username, password, etc.
465
+
466
+ Examples
467
+ --------
468
+ >>> openfile = open('2015-01-01.csv') # doctest: +SKIP
469
+ >>> openfile = open(
470
+ ... 's3://bucket/2015-01-01.csv.gz', compression='gzip'
471
+ ... ) # doctest: +SKIP
472
+ >>> with openfile as f:
473
+ ... df = pd.read_csv(f) # doctest: +SKIP
474
+ ...
475
+
476
+ Returns
477
+ -------
478
+ ``OpenFile`` object.
479
+
480
+ Notes
481
+ -----
482
+ For a full list of the available protocols and the implementations that
483
+ they map across to see the latest online documentation:
484
+
485
+ - For implementations built into ``fsspec`` see
486
+ https://filesystem-spec.readthedocs.io/en/latest/api.html#built-in-implementations
487
+ - For implementations in separate packages see
488
+ https://filesystem-spec.readthedocs.io/en/latest/api.html#other-known-implementations
489
+ """
490
+ expand = DEFAULT_EXPAND if expand is None else expand
491
+ out = open_files(
492
+ urlpath=[urlpath],
493
+ mode=mode,
494
+ compression=compression,
495
+ encoding=encoding,
496
+ errors=errors,
497
+ protocol=protocol,
498
+ newline=newline,
499
+ expand=expand,
500
+ **kwargs,
501
+ )
502
+ if not out:
503
+ raise FileNotFoundError(urlpath)
504
+ return out[0]
505
+
506
+
507
+ def open_local(
508
+ url: str | list[str] | Path | list[Path],
509
+ mode: str = "rb",
510
+ **storage_options: dict,
511
+ ) -> str | list[str]:
512
+ """Open file(s) which can be resolved to local
513
+
514
+ For files which either are local, or get downloaded upon open
515
+ (e.g., by file caching)
516
+
517
+ Parameters
518
+ ----------
519
+ url: str or list(str)
520
+ mode: str
521
+ Must be read mode
522
+ storage_options:
523
+ passed on to FS for or used by open_files (e.g., compression)
524
+ """
525
+ if "r" not in mode:
526
+ raise ValueError("Can only ensure local files when reading")
527
+ of = open_files(url, mode=mode, **storage_options)
528
+ if not getattr(of[0].fs, "local_file", False):
529
+ raise ValueError(
530
+ "open_local can only be used on a filesystem which"
531
+ " has attribute local_file=True"
532
+ )
533
+ with of as files:
534
+ paths = [f.name for f in files]
535
+ if (isinstance(url, str) and not has_magic(url)) or isinstance(url, Path):
536
+ return paths[0]
537
+ return paths
538
+
539
+
540
+ def get_compression(urlpath, compression):
541
+ if compression == "infer":
542
+ compression = infer_compression(urlpath)
543
+ if compression is not None and compression not in compr:
544
+ raise ValueError(f"Compression type {compression} not supported")
545
+ return compression
546
+
547
+
548
+ def split_protocol(urlpath):
549
+ """Return protocol, path pair"""
550
+ urlpath = stringify_path(urlpath)
551
+ if "://" in urlpath:
552
+ protocol, path = urlpath.split("://", 1)
553
+ if len(protocol) > 1:
554
+ # excludes Windows paths
555
+ return protocol, path
556
+ if urlpath.startswith("data:"):
557
+ return urlpath.split(":", 1)
558
+ return None, urlpath
559
+
560
+
561
+ def strip_protocol(urlpath):
562
+ """Return only path part of full URL, according to appropriate backend"""
563
+ protocol, _ = split_protocol(urlpath)
564
+ cls = get_filesystem_class(protocol)
565
+ return cls._strip_protocol(urlpath)
566
+
567
+
568
+ def expand_paths_if_needed(paths, mode, num, fs, name_function):
569
+ """Expand paths if they have a ``*`` in them (write mode) or any of ``*?[]``
570
+ in them (read mode).
571
+
572
+ :param paths: list of paths
573
+ mode: str
574
+ Mode in which to open files.
575
+ num: int
576
+ If opening in writing mode, number of files we expect to create.
577
+ fs: filesystem object
578
+ name_function: callable
579
+ If opening in writing mode, this callable is used to generate path
580
+ names. Names are generated for each partition by
581
+ ``urlpath.replace('*', name_function(partition_index))``.
582
+ :return: list of paths
583
+ """
584
+ expanded_paths = []
585
+ paths = list(paths)
586
+
587
+ if "w" in mode: # read mode
588
+ if sum(1 for p in paths if "*" in p) > 1:
589
+ raise ValueError(
590
+ "When writing data, only one filename mask can be specified."
591
+ )
592
+ num = max(num, len(paths))
593
+
594
+ for curr_path in paths:
595
+ if "*" in curr_path:
596
+ # expand using name_function
597
+ expanded_paths.extend(_expand_paths(curr_path, name_function, num))
598
+ else:
599
+ expanded_paths.append(curr_path)
600
+ # if we generated more paths that asked for, trim the list
601
+ if len(expanded_paths) > num:
602
+ expanded_paths = expanded_paths[:num]
603
+
604
+ else: # read mode
605
+ for curr_path in paths:
606
+ if has_magic(curr_path):
607
+ # expand using glob
608
+ expanded_paths.extend(fs.glob(curr_path))
609
+ else:
610
+ expanded_paths.append(curr_path)
611
+
612
+ return expanded_paths
613
+
614
+
615
+ def get_fs_token_paths(
616
+ urlpath,
617
+ mode="rb",
618
+ num=1,
619
+ name_function=None,
620
+ storage_options=None,
621
+ protocol=None,
622
+ expand=True,
623
+ ):
624
+ """Filesystem, deterministic token, and paths from a urlpath and options.
625
+
626
+ Parameters
627
+ ----------
628
+ urlpath: string or iterable
629
+ Absolute or relative filepath, URL (may include protocols like
630
+ ``s3://``), or globstring pointing to data.
631
+ mode: str, optional
632
+ Mode in which to open files.
633
+ num: int, optional
634
+ If opening in writing mode, number of files we expect to create.
635
+ name_function: callable, optional
636
+ If opening in writing mode, this callable is used to generate path
637
+ names. Names are generated for each partition by
638
+ ``urlpath.replace('*', name_function(partition_index))``.
639
+ storage_options: dict, optional
640
+ Additional keywords to pass to the filesystem class.
641
+ protocol: str or None
642
+ To override the protocol specifier in the URL
643
+ expand: bool
644
+ Expand string paths for writing, assuming the path is a directory
645
+ """
646
+ if isinstance(urlpath, (list, tuple, set)):
647
+ if not urlpath:
648
+ raise ValueError("empty urlpath sequence")
649
+ urlpath0 = stringify_path(next(iter(urlpath)))
650
+ else:
651
+ urlpath0 = stringify_path(urlpath)
652
+ storage_options = storage_options or {}
653
+ if protocol:
654
+ storage_options["protocol"] = protocol
655
+ chain = _un_chain(urlpath0, storage_options or {})
656
+ inkwargs = {}
657
+ # Reverse iterate the chain, creating a nested target_* structure
658
+ for i, ch in enumerate(reversed(chain)):
659
+ urls, nested_protocol, kw = ch
660
+ if i == len(chain) - 1:
661
+ inkwargs = dict(**kw, **inkwargs)
662
+ continue
663
+ inkwargs["target_options"] = dict(**kw, **inkwargs)
664
+ inkwargs["target_protocol"] = nested_protocol
665
+ inkwargs["fo"] = urls
666
+ paths, protocol, _ = chain[0]
667
+ fs = filesystem(protocol, **inkwargs)
668
+ if isinstance(urlpath, (list, tuple, set)):
669
+ pchains = [
670
+ _un_chain(stringify_path(u), storage_options or {})[0] for u in urlpath
671
+ ]
672
+ if len({pc[1] for pc in pchains}) > 1:
673
+ raise ValueError("Protocol mismatch getting fs from %s", urlpath)
674
+ paths = [pc[0] for pc in pchains]
675
+ else:
676
+ paths = fs._strip_protocol(paths)
677
+ if isinstance(paths, (list, tuple, set)):
678
+ if expand:
679
+ paths = expand_paths_if_needed(paths, mode, num, fs, name_function)
680
+ elif not isinstance(paths, list):
681
+ paths = list(paths)
682
+ else:
683
+ if ("w" in mode or "x" in mode) and expand:
684
+ paths = _expand_paths(paths, name_function, num)
685
+ elif "*" in paths:
686
+ paths = [f for f in sorted(fs.glob(paths)) if not fs.isdir(f)]
687
+ else:
688
+ paths = [paths]
689
+
690
+ return fs, fs._fs_token, paths
691
+
692
+
693
+ def _expand_paths(path, name_function, num):
694
+ if isinstance(path, str):
695
+ if path.count("*") > 1:
696
+ raise ValueError("Output path spec must contain exactly one '*'.")
697
+ elif "*" not in path:
698
+ path = os.path.join(path, "*.part")
699
+
700
+ if name_function is None:
701
+ name_function = build_name_function(num - 1)
702
+
703
+ paths = [path.replace("*", name_function(i)) for i in range(num)]
704
+ if paths != sorted(paths):
705
+ logger.warning(
706
+ "In order to preserve order between partitions"
707
+ " paths created with ``name_function`` should "
708
+ "sort to partition order"
709
+ )
710
+ elif isinstance(path, (tuple, list)):
711
+ assert len(path) == num
712
+ paths = list(path)
713
+ else:
714
+ raise ValueError(
715
+ "Path should be either\n"
716
+ "1. A list of paths: ['foo.json', 'bar.json', ...]\n"
717
+ "2. A directory: 'foo/\n"
718
+ "3. A path with a '*' in it: 'foo.*.json'"
719
+ )
720
+ return paths
721
+
722
+
723
+ class PickleableTextIOWrapper(io.TextIOWrapper):
724
+ """TextIOWrapper cannot be pickled. This solves it.
725
+
726
+ Requires that ``buffer`` be pickleable, which all instances of
727
+ AbstractBufferedFile are.
728
+ """
729
+
730
+ def __init__(
731
+ self,
732
+ buffer,
733
+ encoding=None,
734
+ errors=None,
735
+ newline=None,
736
+ line_buffering=False,
737
+ write_through=False,
738
+ ):
739
+ self.args = buffer, encoding, errors, newline, line_buffering, write_through
740
+ super().__init__(*self.args)
741
+
742
+ def __reduce__(self):
743
+ return PickleableTextIOWrapper, self.args
.venv/lib/python3.13/site-packages/fsspec/dircache.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ from collections.abc import MutableMapping
3
+ from functools import lru_cache
4
+
5
+
6
+ class DirCache(MutableMapping):
7
+ """
8
+ Caching of directory listings, in a structure like::
9
+
10
+ {"path0": [
11
+ {"name": "path0/file0",
12
+ "size": 123,
13
+ "type": "file",
14
+ ...
15
+ },
16
+ {"name": "path0/file1",
17
+ },
18
+ ...
19
+ ],
20
+ "path1": [...]
21
+ }
22
+
23
+ Parameters to this class control listing expiry or indeed turn
24
+ caching off
25
+ """
26
+
27
+ def __init__(
28
+ self,
29
+ use_listings_cache=True,
30
+ listings_expiry_time=None,
31
+ max_paths=None,
32
+ **kwargs,
33
+ ):
34
+ """
35
+
36
+ Parameters
37
+ ----------
38
+ use_listings_cache: bool
39
+ If False, this cache never returns items, but always reports KeyError,
40
+ and setting items has no effect
41
+ listings_expiry_time: int or float (optional)
42
+ Time in seconds that a listing is considered valid. If None,
43
+ listings do not expire.
44
+ max_paths: int (optional)
45
+ The number of most recent listings that are considered valid; 'recent'
46
+ refers to when the entry was set.
47
+ """
48
+ self._cache = {}
49
+ self._times = {}
50
+ if max_paths:
51
+ self._q = lru_cache(max_paths + 1)(lambda key: self._cache.pop(key, None))
52
+ self.use_listings_cache = use_listings_cache
53
+ self.listings_expiry_time = listings_expiry_time
54
+ self.max_paths = max_paths
55
+
56
+ def __getitem__(self, item):
57
+ if self.listings_expiry_time is not None:
58
+ if self._times.get(item, 0) - time.time() < -self.listings_expiry_time:
59
+ del self._cache[item]
60
+ if self.max_paths:
61
+ self._q(item)
62
+ return self._cache[item] # maybe raises KeyError
63
+
64
+ def clear(self):
65
+ self._cache.clear()
66
+
67
+ def __len__(self):
68
+ return len(self._cache)
69
+
70
+ def __contains__(self, item):
71
+ try:
72
+ self[item]
73
+ return True
74
+ except KeyError:
75
+ return False
76
+
77
+ def __setitem__(self, key, value):
78
+ if not self.use_listings_cache:
79
+ return
80
+ if self.max_paths:
81
+ self._q(key)
82
+ self._cache[key] = value
83
+ if self.listings_expiry_time is not None:
84
+ self._times[key] = time.time()
85
+
86
+ def __delitem__(self, key):
87
+ del self._cache[key]
88
+
89
+ def __iter__(self):
90
+ entries = list(self._cache)
91
+
92
+ return (k for k in entries if k in self)
93
+
94
+ def __reduce__(self):
95
+ return (
96
+ DirCache,
97
+ (self.use_listings_cache, self.listings_expiry_time, self.max_paths),
98
+ )
.venv/lib/python3.13/site-packages/fsspec/fuse.py ADDED
@@ -0,0 +1,324 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import logging
3
+ import os
4
+ import stat
5
+ import threading
6
+ import time
7
+ from errno import EIO, ENOENT
8
+
9
+ from fuse import FUSE, FuseOSError, LoggingMixIn, Operations
10
+
11
+ from fsspec import __version__
12
+ from fsspec.core import url_to_fs
13
+
14
+ logger = logging.getLogger("fsspec.fuse")
15
+
16
+
17
+ class FUSEr(Operations):
18
+ def __init__(self, fs, path, ready_file=False):
19
+ self.fs = fs
20
+ self.cache = {}
21
+ self.root = path.rstrip("/") + "/"
22
+ self.counter = 0
23
+ logger.info("Starting FUSE at %s", path)
24
+ self._ready_file = ready_file
25
+
26
+ def getattr(self, path, fh=None):
27
+ logger.debug("getattr %s", path)
28
+ if self._ready_file and path in ["/.fuse_ready", ".fuse_ready"]:
29
+ return {"type": "file", "st_size": 5}
30
+
31
+ path = "".join([self.root, path.lstrip("/")]).rstrip("/")
32
+ try:
33
+ info = self.fs.info(path)
34
+ except FileNotFoundError as exc:
35
+ raise FuseOSError(ENOENT) from exc
36
+
37
+ data = {"st_uid": info.get("uid", 1000), "st_gid": info.get("gid", 1000)}
38
+ perm = info.get("mode", 0o777)
39
+
40
+ if info["type"] != "file":
41
+ data["st_mode"] = stat.S_IFDIR | perm
42
+ data["st_size"] = 0
43
+ data["st_blksize"] = 0
44
+ else:
45
+ data["st_mode"] = stat.S_IFREG | perm
46
+ data["st_size"] = info["size"]
47
+ data["st_blksize"] = 5 * 2**20
48
+ data["st_nlink"] = 1
49
+ data["st_atime"] = info["atime"] if "atime" in info else time.time()
50
+ data["st_ctime"] = info["ctime"] if "ctime" in info else time.time()
51
+ data["st_mtime"] = info["mtime"] if "mtime" in info else time.time()
52
+ return data
53
+
54
+ def readdir(self, path, fh):
55
+ logger.debug("readdir %s", path)
56
+ path = "".join([self.root, path.lstrip("/")])
57
+ files = self.fs.ls(path, False)
58
+ files = [os.path.basename(f.rstrip("/")) for f in files]
59
+ return [".", ".."] + files
60
+
61
+ def mkdir(self, path, mode):
62
+ path = "".join([self.root, path.lstrip("/")])
63
+ self.fs.mkdir(path)
64
+ return 0
65
+
66
+ def rmdir(self, path):
67
+ path = "".join([self.root, path.lstrip("/")])
68
+ self.fs.rmdir(path)
69
+ return 0
70
+
71
+ def read(self, path, size, offset, fh):
72
+ logger.debug("read %s", (path, size, offset))
73
+ if self._ready_file and path in ["/.fuse_ready", ".fuse_ready"]:
74
+ # status indicator
75
+ return b"ready"
76
+
77
+ f = self.cache[fh]
78
+ f.seek(offset)
79
+ out = f.read(size)
80
+ return out
81
+
82
+ def write(self, path, data, offset, fh):
83
+ logger.debug("write %s", (path, offset))
84
+ f = self.cache[fh]
85
+ f.seek(offset)
86
+ f.write(data)
87
+ return len(data)
88
+
89
+ def create(self, path, flags, fi=None):
90
+ logger.debug("create %s", (path, flags))
91
+ fn = "".join([self.root, path.lstrip("/")])
92
+ self.fs.touch(fn) # OS will want to get attributes immediately
93
+ f = self.fs.open(fn, "wb")
94
+ self.cache[self.counter] = f
95
+ self.counter += 1
96
+ return self.counter - 1
97
+
98
+ def open(self, path, flags):
99
+ logger.debug("open %s", (path, flags))
100
+ fn = "".join([self.root, path.lstrip("/")])
101
+ if flags % 2 == 0:
102
+ # read
103
+ mode = "rb"
104
+ else:
105
+ # write/create
106
+ mode = "wb"
107
+ self.cache[self.counter] = self.fs.open(fn, mode)
108
+ self.counter += 1
109
+ return self.counter - 1
110
+
111
+ def truncate(self, path, length, fh=None):
112
+ fn = "".join([self.root, path.lstrip("/")])
113
+ if length != 0:
114
+ raise NotImplementedError
115
+ # maybe should be no-op since open with write sets size to zero anyway
116
+ self.fs.touch(fn)
117
+
118
+ def unlink(self, path):
119
+ fn = "".join([self.root, path.lstrip("/")])
120
+ try:
121
+ self.fs.rm(fn, False)
122
+ except (OSError, FileNotFoundError) as exc:
123
+ raise FuseOSError(EIO) from exc
124
+
125
+ def release(self, path, fh):
126
+ try:
127
+ if fh in self.cache:
128
+ f = self.cache[fh]
129
+ f.close()
130
+ self.cache.pop(fh)
131
+ except Exception as e:
132
+ print(e)
133
+ return 0
134
+
135
+ def chmod(self, path, mode):
136
+ if hasattr(self.fs, "chmod"):
137
+ path = "".join([self.root, path.lstrip("/")])
138
+ return self.fs.chmod(path, mode)
139
+ raise NotImplementedError
140
+
141
+
142
+ def run(
143
+ fs,
144
+ path,
145
+ mount_point,
146
+ foreground=True,
147
+ threads=False,
148
+ ready_file=False,
149
+ ops_class=FUSEr,
150
+ ):
151
+ """Mount stuff in a local directory
152
+
153
+ This uses fusepy to make it appear as if a given path on an fsspec
154
+ instance is in fact resident within the local file-system.
155
+
156
+ This requires that fusepy by installed, and that FUSE be available on
157
+ the system (typically requiring a package to be installed with
158
+ apt, yum, brew, etc.).
159
+
160
+ Parameters
161
+ ----------
162
+ fs: file-system instance
163
+ From one of the compatible implementations
164
+ path: str
165
+ Location on that file-system to regard as the root directory to
166
+ mount. Note that you typically should include the terminating "/"
167
+ character.
168
+ mount_point: str
169
+ An empty directory on the local file-system where the contents of
170
+ the remote path will appear.
171
+ foreground: bool
172
+ Whether or not calling this function will block. Operation will
173
+ typically be more stable if True.
174
+ threads: bool
175
+ Whether or not to create threads when responding to file operations
176
+ within the mounter directory. Operation will typically be more
177
+ stable if False.
178
+ ready_file: bool
179
+ Whether the FUSE process is ready. The ``.fuse_ready`` file will
180
+ exist in the ``mount_point`` directory if True. Debugging purpose.
181
+ ops_class: FUSEr or Subclass of FUSEr
182
+ To override the default behavior of FUSEr. For Example, logging
183
+ to file.
184
+
185
+ """
186
+ func = lambda: FUSE(
187
+ ops_class(fs, path, ready_file=ready_file),
188
+ mount_point,
189
+ nothreads=not threads,
190
+ foreground=foreground,
191
+ )
192
+ if not foreground:
193
+ th = threading.Thread(target=func)
194
+ th.daemon = True
195
+ th.start()
196
+ return th
197
+ else: # pragma: no cover
198
+ try:
199
+ func()
200
+ except KeyboardInterrupt:
201
+ pass
202
+
203
+
204
+ def main(args):
205
+ """Mount filesystem from chained URL to MOUNT_POINT.
206
+
207
+ Examples:
208
+
209
+ python3 -m fsspec.fuse memory /usr/share /tmp/mem
210
+
211
+ python3 -m fsspec.fuse local /tmp/source /tmp/local \\
212
+ -l /tmp/fsspecfuse.log
213
+
214
+ You can also mount chained-URLs and use special settings:
215
+
216
+ python3 -m fsspec.fuse 'filecache::zip::file://data.zip' \\
217
+ / /tmp/zip \\
218
+ -o 'filecache-cache_storage=/tmp/simplecache'
219
+
220
+ You can specify the type of the setting by using `[int]` or `[bool]`,
221
+ (`true`, `yes`, `1` represents the Boolean value `True`):
222
+
223
+ python3 -m fsspec.fuse 'simplecache::ftp://ftp1.at.proftpd.org' \\
224
+ /historic/packages/RPMS /tmp/ftp \\
225
+ -o 'simplecache-cache_storage=/tmp/simplecache' \\
226
+ -o 'simplecache-check_files=false[bool]' \\
227
+ -o 'ftp-listings_expiry_time=60[int]' \\
228
+ -o 'ftp-username=anonymous' \\
229
+ -o 'ftp-password=xieyanbo'
230
+ """
231
+
232
+ class RawDescriptionArgumentParser(argparse.ArgumentParser):
233
+ def format_help(self):
234
+ usage = super().format_help()
235
+ parts = usage.split("\n\n")
236
+ parts[1] = self.description.rstrip()
237
+ return "\n\n".join(parts)
238
+
239
+ parser = RawDescriptionArgumentParser(prog="fsspec.fuse", description=main.__doc__)
240
+ parser.add_argument("--version", action="version", version=__version__)
241
+ parser.add_argument("url", type=str, help="fs url")
242
+ parser.add_argument("source_path", type=str, help="source directory in fs")
243
+ parser.add_argument("mount_point", type=str, help="local directory")
244
+ parser.add_argument(
245
+ "-o",
246
+ "--option",
247
+ action="append",
248
+ help="Any options of protocol included in the chained URL",
249
+ )
250
+ parser.add_argument(
251
+ "-l", "--log-file", type=str, help="Logging FUSE debug info (Default: '')"
252
+ )
253
+ parser.add_argument(
254
+ "-f",
255
+ "--foreground",
256
+ action="store_false",
257
+ help="Running in foreground or not (Default: False)",
258
+ )
259
+ parser.add_argument(
260
+ "-t",
261
+ "--threads",
262
+ action="store_false",
263
+ help="Running with threads support (Default: False)",
264
+ )
265
+ parser.add_argument(
266
+ "-r",
267
+ "--ready-file",
268
+ action="store_false",
269
+ help="The `.fuse_ready` file will exist after FUSE is ready. "
270
+ "(Debugging purpose, Default: False)",
271
+ )
272
+ args = parser.parse_args(args)
273
+
274
+ kwargs = {}
275
+ for item in args.option or []:
276
+ key, sep, value = item.partition("=")
277
+ if not sep:
278
+ parser.error(message=f"Wrong option: {item!r}")
279
+ val = value.lower()
280
+ if val.endswith("[int]"):
281
+ value = int(value[: -len("[int]")])
282
+ elif val.endswith("[bool]"):
283
+ value = val[: -len("[bool]")] in ["1", "yes", "true"]
284
+
285
+ if "-" in key:
286
+ fs_name, setting_name = key.split("-", 1)
287
+ if fs_name in kwargs:
288
+ kwargs[fs_name][setting_name] = value
289
+ else:
290
+ kwargs[fs_name] = {setting_name: value}
291
+ else:
292
+ kwargs[key] = value
293
+
294
+ if args.log_file:
295
+ logging.basicConfig(
296
+ level=logging.DEBUG,
297
+ filename=args.log_file,
298
+ format="%(asctime)s %(message)s",
299
+ )
300
+
301
+ class LoggingFUSEr(FUSEr, LoggingMixIn):
302
+ pass
303
+
304
+ fuser = LoggingFUSEr
305
+ else:
306
+ fuser = FUSEr
307
+
308
+ fs, url_path = url_to_fs(args.url, **kwargs)
309
+ logger.debug("Mounting %s to %s", url_path, str(args.mount_point))
310
+ run(
311
+ fs,
312
+ args.source_path,
313
+ args.mount_point,
314
+ foreground=args.foreground,
315
+ threads=args.threads,
316
+ ready_file=args.ready_file,
317
+ ops_class=fuser,
318
+ )
319
+
320
+
321
+ if __name__ == "__main__":
322
+ import sys
323
+
324
+ main(sys.argv[1:])
.venv/lib/python3.13/site-packages/fsspec/generic.py ADDED
@@ -0,0 +1,394 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import inspect
4
+ import logging
5
+ import os
6
+ import shutil
7
+ import uuid
8
+
9
+ from .asyn import AsyncFileSystem, _run_coros_in_chunks, sync_wrapper
10
+ from .callbacks import DEFAULT_CALLBACK
11
+ from .core import filesystem, get_filesystem_class, split_protocol, url_to_fs
12
+
13
+ _generic_fs = {}
14
+ logger = logging.getLogger("fsspec.generic")
15
+
16
+
17
+ def set_generic_fs(protocol, **storage_options):
18
+ """Populate the dict used for method=="generic" lookups"""
19
+ _generic_fs[protocol] = filesystem(protocol, **storage_options)
20
+
21
+
22
+ def _resolve_fs(url, method, protocol=None, storage_options=None):
23
+ """Pick instance of backend FS"""
24
+ url = url[0] if isinstance(url, (list, tuple)) else url
25
+ protocol = protocol or split_protocol(url)[0]
26
+ storage_options = storage_options or {}
27
+ if method == "default":
28
+ return filesystem(protocol)
29
+ if method == "generic":
30
+ return _generic_fs[protocol]
31
+ if method == "current":
32
+ cls = get_filesystem_class(protocol)
33
+ return cls.current()
34
+ if method == "options":
35
+ fs, _ = url_to_fs(url, **storage_options.get(protocol, {}))
36
+ return fs
37
+ raise ValueError(f"Unknown FS resolution method: {method}")
38
+
39
+
40
+ def rsync(
41
+ source,
42
+ destination,
43
+ delete_missing=False,
44
+ source_field="size",
45
+ dest_field="size",
46
+ update_cond="different",
47
+ inst_kwargs=None,
48
+ fs=None,
49
+ **kwargs,
50
+ ):
51
+ """Sync files between two directory trees
52
+
53
+ (experimental)
54
+
55
+ Parameters
56
+ ----------
57
+ source: str
58
+ Root of the directory tree to take files from. This must be a directory, but
59
+ do not include any terminating "/" character
60
+ destination: str
61
+ Root path to copy into. The contents of this location should be
62
+ identical to the contents of ``source`` when done. This will be made a
63
+ directory, and the terminal "/" should not be included.
64
+ delete_missing: bool
65
+ If there are paths in the destination that don't exist in the
66
+ source and this is True, delete them. Otherwise, leave them alone.
67
+ source_field: str | callable
68
+ If ``update_field`` is "different", this is the key in the info
69
+ of source files to consider for difference. Maybe a function of the
70
+ info dict.
71
+ dest_field: str | callable
72
+ If ``update_field`` is "different", this is the key in the info
73
+ of destination files to consider for difference. May be a function of
74
+ the info dict.
75
+ update_cond: "different"|"always"|"never"
76
+ If "always", every file is copied, regardless of whether it exists in
77
+ the destination. If "never", files that exist in the destination are
78
+ not copied again. If "different" (default), only copy if the info
79
+ fields given by ``source_field`` and ``dest_field`` (usually "size")
80
+ are different. Other comparisons may be added in the future.
81
+ inst_kwargs: dict|None
82
+ If ``fs`` is None, use this set of keyword arguments to make a
83
+ GenericFileSystem instance
84
+ fs: GenericFileSystem|None
85
+ Instance to use if explicitly given. The instance defines how to
86
+ to make downstream file system instances from paths.
87
+
88
+ Returns
89
+ -------
90
+ dict of the copy operations that were performed, {source: destination}
91
+ """
92
+ fs = fs or GenericFileSystem(**(inst_kwargs or {}))
93
+ source = fs._strip_protocol(source)
94
+ destination = fs._strip_protocol(destination)
95
+ allfiles = fs.find(source, withdirs=True, detail=True)
96
+ if not fs.isdir(source):
97
+ raise ValueError("Can only rsync on a directory")
98
+ otherfiles = fs.find(destination, withdirs=True, detail=True)
99
+ dirs = [
100
+ a
101
+ for a, v in allfiles.items()
102
+ if v["type"] == "directory" and a.replace(source, destination) not in otherfiles
103
+ ]
104
+ logger.debug(f"{len(dirs)} directories to create")
105
+ if dirs:
106
+ fs.make_many_dirs(
107
+ [dirn.replace(source, destination) for dirn in dirs], exist_ok=True
108
+ )
109
+ allfiles = {a: v for a, v in allfiles.items() if v["type"] == "file"}
110
+ logger.debug(f"{len(allfiles)} files to consider for copy")
111
+ to_delete = [
112
+ o
113
+ for o, v in otherfiles.items()
114
+ if o.replace(destination, source) not in allfiles and v["type"] == "file"
115
+ ]
116
+ for k, v in allfiles.copy().items():
117
+ otherfile = k.replace(source, destination)
118
+ if otherfile in otherfiles:
119
+ if update_cond == "always":
120
+ allfiles[k] = otherfile
121
+ elif update_cond == "different":
122
+ inf1 = source_field(v) if callable(source_field) else v[source_field]
123
+ v2 = otherfiles[otherfile]
124
+ inf2 = dest_field(v2) if callable(dest_field) else v2[dest_field]
125
+ if inf1 != inf2:
126
+ # details mismatch, make copy
127
+ allfiles[k] = otherfile
128
+ else:
129
+ # details match, don't copy
130
+ allfiles.pop(k)
131
+ else:
132
+ # file not in target yet
133
+ allfiles[k] = otherfile
134
+ logger.debug(f"{len(allfiles)} files to copy")
135
+ if allfiles:
136
+ source_files, target_files = zip(*allfiles.items())
137
+ fs.cp(source_files, target_files, **kwargs)
138
+ logger.debug(f"{len(to_delete)} files to delete")
139
+ if delete_missing and to_delete:
140
+ fs.rm(to_delete)
141
+ return allfiles
142
+
143
+
144
+ class GenericFileSystem(AsyncFileSystem):
145
+ """Wrapper over all other FS types
146
+
147
+ <experimental!>
148
+
149
+ This implementation is a single unified interface to be able to run FS operations
150
+ over generic URLs, and dispatch to the specific implementations using the URL
151
+ protocol prefix.
152
+
153
+ Note: instances of this FS are always async, even if you never use it with any async
154
+ backend.
155
+ """
156
+
157
+ protocol = "generic" # there is no real reason to ever use a protocol with this FS
158
+
159
+ def __init__(self, default_method="default", storage_options=None, **kwargs):
160
+ """
161
+
162
+ Parameters
163
+ ----------
164
+ default_method: str (optional)
165
+ Defines how to configure backend FS instances. Options are:
166
+ - "default": instantiate like FSClass(), with no
167
+ extra arguments; this is the default instance of that FS, and can be
168
+ configured via the config system
169
+ - "generic": takes instances from the `_generic_fs` dict in this module,
170
+ which you must populate before use. Keys are by protocol
171
+ - "options": expects storage_options, a dict mapping protocol to
172
+ kwargs to use when constructing the filesystem
173
+ - "current": takes the most recently instantiated version of each FS
174
+ """
175
+ self.method = default_method
176
+ self.st_opts = storage_options
177
+ super().__init__(**kwargs)
178
+
179
+ def _parent(self, path):
180
+ fs = _resolve_fs(path, self.method, storage_options=self.st_opts)
181
+ return fs.unstrip_protocol(fs._parent(path))
182
+
183
+ def _strip_protocol(self, path):
184
+ # normalization only
185
+ fs = _resolve_fs(path, self.method, storage_options=self.st_opts)
186
+ return fs.unstrip_protocol(fs._strip_protocol(path))
187
+
188
+ async def _find(self, path, maxdepth=None, withdirs=False, detail=False, **kwargs):
189
+ fs = _resolve_fs(path, self.method, storage_options=self.st_opts)
190
+ if fs.async_impl:
191
+ out = await fs._find(
192
+ path, maxdepth=maxdepth, withdirs=withdirs, detail=True, **kwargs
193
+ )
194
+ else:
195
+ out = fs.find(
196
+ path, maxdepth=maxdepth, withdirs=withdirs, detail=True, **kwargs
197
+ )
198
+ result = {}
199
+ for k, v in out.items():
200
+ v = v.copy() # don't corrupt target FS dircache
201
+ name = fs.unstrip_protocol(k)
202
+ v["name"] = name
203
+ result[name] = v
204
+ if detail:
205
+ return result
206
+ return list(result)
207
+
208
+ async def _info(self, url, **kwargs):
209
+ fs = _resolve_fs(url, self.method)
210
+ if fs.async_impl:
211
+ out = await fs._info(url, **kwargs)
212
+ else:
213
+ out = fs.info(url, **kwargs)
214
+ out = out.copy() # don't edit originals
215
+ out["name"] = fs.unstrip_protocol(out["name"])
216
+ return out
217
+
218
+ async def _ls(
219
+ self,
220
+ url,
221
+ detail=True,
222
+ **kwargs,
223
+ ):
224
+ fs = _resolve_fs(url, self.method)
225
+ if fs.async_impl:
226
+ out = await fs._ls(url, detail=True, **kwargs)
227
+ else:
228
+ out = fs.ls(url, detail=True, **kwargs)
229
+ out = [o.copy() for o in out] # don't edit originals
230
+ for o in out:
231
+ o["name"] = fs.unstrip_protocol(o["name"])
232
+ if detail:
233
+ return out
234
+ else:
235
+ return [o["name"] for o in out]
236
+
237
+ async def _cat_file(
238
+ self,
239
+ url,
240
+ **kwargs,
241
+ ):
242
+ fs = _resolve_fs(url, self.method)
243
+ if fs.async_impl:
244
+ return await fs._cat_file(url, **kwargs)
245
+ else:
246
+ return fs.cat_file(url, **kwargs)
247
+
248
+ async def _pipe_file(
249
+ self,
250
+ path,
251
+ value,
252
+ **kwargs,
253
+ ):
254
+ fs = _resolve_fs(path, self.method, storage_options=self.st_opts)
255
+ if fs.async_impl:
256
+ return await fs._pipe_file(path, value, **kwargs)
257
+ else:
258
+ return fs.pipe_file(path, value, **kwargs)
259
+
260
+ async def _rm(self, url, **kwargs):
261
+ urls = url
262
+ if isinstance(urls, str):
263
+ urls = [urls]
264
+ fs = _resolve_fs(urls[0], self.method)
265
+ if fs.async_impl:
266
+ await fs._rm(urls, **kwargs)
267
+ else:
268
+ fs.rm(url, **kwargs)
269
+
270
+ async def _makedirs(self, path, exist_ok=False):
271
+ logger.debug("Make dir %s", path)
272
+ fs = _resolve_fs(path, self.method, storage_options=self.st_opts)
273
+ if fs.async_impl:
274
+ await fs._makedirs(path, exist_ok=exist_ok)
275
+ else:
276
+ fs.makedirs(path, exist_ok=exist_ok)
277
+
278
+ def rsync(self, source, destination, **kwargs):
279
+ """Sync files between two directory trees
280
+
281
+ See `func:rsync` for more details.
282
+ """
283
+ rsync(source, destination, fs=self, **kwargs)
284
+
285
+ async def _cp_file(
286
+ self,
287
+ url,
288
+ url2,
289
+ blocksize=2**20,
290
+ callback=DEFAULT_CALLBACK,
291
+ tempdir: str | None = None,
292
+ **kwargs,
293
+ ):
294
+ fs = _resolve_fs(url, self.method)
295
+ fs2 = _resolve_fs(url2, self.method)
296
+ if fs is fs2:
297
+ # pure remote
298
+ if fs.async_impl:
299
+ return await fs._copy(url, url2, **kwargs)
300
+ else:
301
+ return fs.copy(url, url2, **kwargs)
302
+ await copy_file_op(fs, [url], fs2, [url2], tempdir, 1, on_error="raise")
303
+
304
+ async def _make_many_dirs(self, urls, exist_ok=True):
305
+ fs = _resolve_fs(urls[0], self.method)
306
+ if fs.async_impl:
307
+ coros = [fs._makedirs(u, exist_ok=exist_ok) for u in urls]
308
+ await _run_coros_in_chunks(coros)
309
+ else:
310
+ for u in urls:
311
+ fs.makedirs(u, exist_ok=exist_ok)
312
+
313
+ make_many_dirs = sync_wrapper(_make_many_dirs)
314
+
315
+ async def _copy(
316
+ self,
317
+ path1: list[str],
318
+ path2: list[str],
319
+ recursive: bool = False,
320
+ on_error: str = "ignore",
321
+ maxdepth: int | None = None,
322
+ batch_size: int | None = None,
323
+ tempdir: str | None = None,
324
+ **kwargs,
325
+ ):
326
+ # TODO: special case for one FS being local, which can use get/put
327
+ # TODO: special case for one being memFS, which can use cat/pipe
328
+ if recursive:
329
+ raise NotImplementedError("Please use fsspec.generic.rsync")
330
+ path1 = [path1] if isinstance(path1, str) else path1
331
+ path2 = [path2] if isinstance(path2, str) else path2
332
+
333
+ fs = _resolve_fs(path1, self.method)
334
+ fs2 = _resolve_fs(path2, self.method)
335
+
336
+ if fs is fs2:
337
+ if fs.async_impl:
338
+ return await fs._copy(path1, path2, **kwargs)
339
+ else:
340
+ return fs.copy(path1, path2, **kwargs)
341
+
342
+ await copy_file_op(
343
+ fs, path1, fs2, path2, tempdir, batch_size, on_error=on_error
344
+ )
345
+
346
+
347
+ async def copy_file_op(
348
+ fs1, url1, fs2, url2, tempdir=None, batch_size=20, on_error="ignore"
349
+ ):
350
+ import tempfile
351
+
352
+ tempdir = tempdir or tempfile.mkdtemp()
353
+ try:
354
+ coros = [
355
+ _copy_file_op(
356
+ fs1,
357
+ u1,
358
+ fs2,
359
+ u2,
360
+ os.path.join(tempdir, uuid.uuid4().hex),
361
+ )
362
+ for u1, u2 in zip(url1, url2)
363
+ ]
364
+ out = await _run_coros_in_chunks(
365
+ coros, batch_size=batch_size, return_exceptions=True
366
+ )
367
+ finally:
368
+ shutil.rmtree(tempdir)
369
+ if on_error == "return":
370
+ return out
371
+ elif on_error == "raise":
372
+ for o in out:
373
+ if isinstance(o, Exception):
374
+ raise o
375
+
376
+
377
+ async def _copy_file_op(fs1, url1, fs2, url2, local, on_error="ignore"):
378
+ if fs1.async_impl:
379
+ await fs1._get_file(url1, local)
380
+ else:
381
+ fs1.get_file(url1, local)
382
+ if fs2.async_impl:
383
+ await fs2._put_file(local, url2)
384
+ else:
385
+ fs2.put_file(local, url2)
386
+ os.unlink(local)
387
+ logger.debug("Copy %s -> %s; done", url1, url2)
388
+
389
+
390
+ async def maybe_await(cor):
391
+ if inspect.iscoroutine(cor):
392
+ return await cor
393
+ else:
394
+ return cor
.venv/lib/python3.13/site-packages/fsspec/gui.py ADDED
@@ -0,0 +1,417 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ast
2
+ import contextlib
3
+ import logging
4
+ import os
5
+ import re
6
+ from collections.abc import Sequence
7
+ from typing import ClassVar
8
+
9
+ import panel as pn
10
+
11
+ from .core import OpenFile, get_filesystem_class, split_protocol
12
+ from .registry import known_implementations
13
+
14
+ pn.extension()
15
+ logger = logging.getLogger("fsspec.gui")
16
+
17
+
18
+ class SigSlot:
19
+ """Signal-slot mixin, for Panel event passing
20
+
21
+ Include this class in a widget manager's superclasses to be able to
22
+ register events and callbacks on Panel widgets managed by that class.
23
+
24
+ The method ``_register`` should be called as widgets are added, and external
25
+ code should call ``connect`` to associate callbacks.
26
+
27
+ By default, all signals emit a DEBUG logging statement.
28
+ """
29
+
30
+ # names of signals that this class may emit each of which must be
31
+ # set by _register for any new instance
32
+ signals: ClassVar[Sequence[str]] = []
33
+ # names of actions that this class may respond to
34
+ slots: ClassVar[Sequence[str]] = []
35
+
36
+ # each of which must be a method name
37
+
38
+ def __init__(self):
39
+ self._ignoring_events = False
40
+ self._sigs = {}
41
+ self._map = {}
42
+ self._setup()
43
+
44
+ def _setup(self):
45
+ """Create GUI elements and register signals"""
46
+ self.panel = pn.pane.PaneBase()
47
+ # no signals to set up in the base class
48
+
49
+ def _register(
50
+ self, widget, name, thing="value", log_level=logging.DEBUG, auto=False
51
+ ):
52
+ """Watch the given attribute of a widget and assign it a named event
53
+
54
+ This is normally called at the time a widget is instantiated, in the
55
+ class which owns it.
56
+
57
+ Parameters
58
+ ----------
59
+ widget : pn.layout.Panel or None
60
+ Widget to watch. If None, an anonymous signal not associated with
61
+ any widget.
62
+ name : str
63
+ Name of this event
64
+ thing : str
65
+ Attribute of the given widget to watch
66
+ log_level : int
67
+ When the signal is triggered, a logging event of the given level
68
+ will be fired in the dfviz logger.
69
+ auto : bool
70
+ If True, automatically connects with a method in this class of the
71
+ same name.
72
+ """
73
+ if name not in self.signals:
74
+ raise ValueError(f"Attempt to assign an undeclared signal: {name}")
75
+ self._sigs[name] = {
76
+ "widget": widget,
77
+ "callbacks": [],
78
+ "thing": thing,
79
+ "log": log_level,
80
+ }
81
+ wn = "-".join(
82
+ [
83
+ getattr(widget, "name", str(widget)) if widget is not None else "none",
84
+ thing,
85
+ ]
86
+ )
87
+ self._map[wn] = name
88
+ if widget is not None:
89
+ widget.param.watch(self._signal, thing, onlychanged=True)
90
+ if auto and hasattr(self, name):
91
+ self.connect(name, getattr(self, name))
92
+
93
+ def _repr_mimebundle_(self, *args, **kwargs):
94
+ """Display in a notebook or a server"""
95
+ try:
96
+ return self.panel._repr_mimebundle_(*args, **kwargs)
97
+ except (ValueError, AttributeError) as exc:
98
+ raise NotImplementedError(
99
+ "Panel does not seem to be set up properly"
100
+ ) from exc
101
+
102
+ def connect(self, signal, slot):
103
+ """Associate call back with given event
104
+
105
+ The callback must be a function which takes the "new" value of the
106
+ watched attribute as the only parameter. If the callback return False,
107
+ this cancels any further processing of the given event.
108
+
109
+ Alternatively, the callback can be a string, in which case it means
110
+ emitting the correspondingly-named event (i.e., connect to self)
111
+ """
112
+ self._sigs[signal]["callbacks"].append(slot)
113
+
114
+ def _signal(self, event):
115
+ """This is called by a an action on a widget
116
+
117
+ Within an self.ignore_events context, nothing happens.
118
+
119
+ Tests can execute this method by directly changing the values of
120
+ widget components.
121
+ """
122
+ if not self._ignoring_events:
123
+ wn = "-".join([event.obj.name, event.name])
124
+ if wn in self._map and self._map[wn] in self._sigs:
125
+ self._emit(self._map[wn], event.new)
126
+
127
+ @contextlib.contextmanager
128
+ def ignore_events(self):
129
+ """Temporarily turn off events processing in this instance
130
+
131
+ (does not propagate to children)
132
+ """
133
+ self._ignoring_events = True
134
+ try:
135
+ yield
136
+ finally:
137
+ self._ignoring_events = False
138
+
139
+ def _emit(self, sig, value=None):
140
+ """An event happened, call its callbacks
141
+
142
+ This method can be used in tests to simulate message passing without
143
+ directly changing visual elements.
144
+
145
+ Calling of callbacks will halt whenever one returns False.
146
+ """
147
+ logger.log(self._sigs[sig]["log"], f"{sig}: {value}")
148
+ for callback in self._sigs[sig]["callbacks"]:
149
+ if isinstance(callback, str):
150
+ self._emit(callback)
151
+ else:
152
+ try:
153
+ # running callbacks should not break the interface
154
+ ret = callback(value)
155
+ if ret is False:
156
+ break
157
+ except Exception as e:
158
+ logger.exception(
159
+ "Exception (%s) while executing callback for signal: %s",
160
+ e,
161
+ sig,
162
+ )
163
+
164
+ def show(self, threads=False):
165
+ """Open a new browser tab and display this instance's interface"""
166
+ self.panel.show(threads=threads, verbose=False)
167
+ return self
168
+
169
+
170
+ class SingleSelect(SigSlot):
171
+ """A multiselect which only allows you to select one item for an event"""
172
+
173
+ signals = ["_selected", "selected"] # the first is internal
174
+ slots = ["set_options", "set_selection", "add", "clear", "select"]
175
+
176
+ def __init__(self, **kwargs):
177
+ self.kwargs = kwargs
178
+ super().__init__()
179
+
180
+ def _setup(self):
181
+ self.panel = pn.widgets.MultiSelect(**self.kwargs)
182
+ self._register(self.panel, "_selected", "value")
183
+ self._register(None, "selected")
184
+ self.connect("_selected", self.select_one)
185
+
186
+ def _signal(self, *args, **kwargs):
187
+ super()._signal(*args, **kwargs)
188
+
189
+ def select_one(self, *_):
190
+ with self.ignore_events():
191
+ val = [self.panel.value[-1]] if self.panel.value else []
192
+ self.panel.value = val
193
+ self._emit("selected", self.panel.value)
194
+
195
+ def set_options(self, options):
196
+ self.panel.options = options
197
+
198
+ def clear(self):
199
+ self.panel.options = []
200
+
201
+ @property
202
+ def value(self):
203
+ return self.panel.value
204
+
205
+ def set_selection(self, selection):
206
+ self.panel.value = [selection]
207
+
208
+
209
+ class FileSelector(SigSlot):
210
+ """Panel-based graphical file selector widget
211
+
212
+ Instances of this widget are interactive and can be displayed in jupyter by having
213
+ them as the output of a cell, or in a separate browser tab using ``.show()``.
214
+ """
215
+
216
+ signals = [
217
+ "protocol_changed",
218
+ "selection_changed",
219
+ "directory_entered",
220
+ "home_clicked",
221
+ "up_clicked",
222
+ "go_clicked",
223
+ "filters_changed",
224
+ ]
225
+ slots = ["set_filters", "go_home"]
226
+
227
+ def __init__(self, url=None, filters=None, ignore=None, kwargs=None):
228
+ """
229
+
230
+ Parameters
231
+ ----------
232
+ url : str (optional)
233
+ Initial value of the URL to populate the dialog; should include protocol
234
+ filters : list(str) (optional)
235
+ File endings to include in the listings. If not included, all files are
236
+ allowed. Does not affect directories.
237
+ If given, the endings will appear as checkboxes in the interface
238
+ ignore : list(str) (optional)
239
+ Regex(s) of file basename patterns to ignore, e.g., "\\." for typical
240
+ hidden files on posix
241
+ kwargs : dict (optional)
242
+ To pass to file system instance
243
+ """
244
+ if url:
245
+ self.init_protocol, url = split_protocol(url)
246
+ else:
247
+ self.init_protocol, url = "file", os.getcwd()
248
+ self.init_url = url
249
+ self.init_kwargs = (kwargs if isinstance(kwargs, str) else str(kwargs)) or "{}"
250
+ self.filters = filters
251
+ self.ignore = [re.compile(i) for i in ignore or []]
252
+ self._fs = None
253
+ super().__init__()
254
+
255
+ def _setup(self):
256
+ self.url = pn.widgets.TextInput(
257
+ name="url",
258
+ value=self.init_url,
259
+ align="end",
260
+ sizing_mode="stretch_width",
261
+ width_policy="max",
262
+ )
263
+ self.protocol = pn.widgets.Select(
264
+ options=sorted(known_implementations),
265
+ value=self.init_protocol,
266
+ name="protocol",
267
+ align="center",
268
+ )
269
+ self.kwargs = pn.widgets.TextInput(
270
+ name="kwargs", value=self.init_kwargs, align="center"
271
+ )
272
+ self.go = pn.widgets.Button(name="⇨", align="end", width=45)
273
+ self.main = SingleSelect(size=10)
274
+ self.home = pn.widgets.Button(name="🏠", width=40, height=30, align="end")
275
+ self.up = pn.widgets.Button(name="‹", width=30, height=30, align="end")
276
+
277
+ self._register(self.protocol, "protocol_changed", auto=True)
278
+ self._register(self.go, "go_clicked", "clicks", auto=True)
279
+ self._register(self.up, "up_clicked", "clicks", auto=True)
280
+ self._register(self.home, "home_clicked", "clicks", auto=True)
281
+ self._register(None, "selection_changed")
282
+ self.main.connect("selected", self.selection_changed)
283
+ self._register(None, "directory_entered")
284
+ self.prev_protocol = self.protocol.value
285
+ self.prev_kwargs = self.storage_options
286
+
287
+ self.filter_sel = pn.widgets.CheckBoxGroup(
288
+ value=[], options=[], inline=False, align="end", width_policy="min"
289
+ )
290
+ self._register(self.filter_sel, "filters_changed", auto=True)
291
+
292
+ self.panel = pn.Column(
293
+ pn.Row(self.protocol, self.kwargs),
294
+ pn.Row(self.home, self.up, self.url, self.go, self.filter_sel),
295
+ self.main.panel,
296
+ )
297
+ self.set_filters(self.filters)
298
+ self.go_clicked()
299
+
300
+ def set_filters(self, filters=None):
301
+ self.filters = filters
302
+ if filters:
303
+ self.filter_sel.options = filters
304
+ self.filter_sel.value = filters
305
+ else:
306
+ self.filter_sel.options = []
307
+ self.filter_sel.value = []
308
+
309
+ @property
310
+ def storage_options(self):
311
+ """Value of the kwargs box as a dictionary"""
312
+ return ast.literal_eval(self.kwargs.value) or {}
313
+
314
+ @property
315
+ def fs(self):
316
+ """Current filesystem instance"""
317
+ if self._fs is None:
318
+ cls = get_filesystem_class(self.protocol.value)
319
+ self._fs = cls(**self.storage_options)
320
+ return self._fs
321
+
322
+ @property
323
+ def urlpath(self):
324
+ """URL of currently selected item"""
325
+ return (
326
+ (f"{self.protocol.value}://{self.main.value[0]}")
327
+ if self.main.value
328
+ else None
329
+ )
330
+
331
+ def open_file(self, mode="rb", compression=None, encoding=None):
332
+ """Create OpenFile instance for the currently selected item
333
+
334
+ For example, in a notebook you might do something like
335
+
336
+ .. code-block::
337
+
338
+ [ ]: sel = FileSelector(); sel
339
+
340
+ # user selects their file
341
+
342
+ [ ]: with sel.open_file('rb') as f:
343
+ ... out = f.read()
344
+
345
+ Parameters
346
+ ----------
347
+ mode: str (optional)
348
+ Open mode for the file.
349
+ compression: str (optional)
350
+ The interact with the file as compressed. Set to 'infer' to guess
351
+ compression from the file ending
352
+ encoding: str (optional)
353
+ If using text mode, use this encoding; defaults to UTF8.
354
+ """
355
+ if self.urlpath is None:
356
+ raise ValueError("No file selected")
357
+ return OpenFile(self.fs, self.urlpath, mode, compression, encoding)
358
+
359
+ def filters_changed(self, values):
360
+ self.filters = values
361
+ self.go_clicked()
362
+
363
+ def selection_changed(self, *_):
364
+ if self.urlpath is None:
365
+ return
366
+ if self.fs.isdir(self.urlpath):
367
+ self.url.value = self.fs._strip_protocol(self.urlpath)
368
+ self.go_clicked()
369
+
370
+ def go_clicked(self, *_):
371
+ if (
372
+ self.prev_protocol != self.protocol.value
373
+ or self.prev_kwargs != self.storage_options
374
+ ):
375
+ self._fs = None # causes fs to be recreated
376
+ self.prev_protocol = self.protocol.value
377
+ self.prev_kwargs = self.storage_options
378
+ listing = sorted(
379
+ self.fs.ls(self.url.value, detail=True), key=lambda x: x["name"]
380
+ )
381
+ listing = [
382
+ l
383
+ for l in listing
384
+ if not any(i.match(l["name"].rsplit("/", 1)[-1]) for i in self.ignore)
385
+ ]
386
+ folders = {
387
+ "📁 " + o["name"].rsplit("/", 1)[-1]: o["name"]
388
+ for o in listing
389
+ if o["type"] == "directory"
390
+ }
391
+ files = {
392
+ "📄 " + o["name"].rsplit("/", 1)[-1]: o["name"]
393
+ for o in listing
394
+ if o["type"] == "file"
395
+ }
396
+ if self.filters:
397
+ files = {
398
+ k: v
399
+ for k, v in files.items()
400
+ if any(v.endswith(ext) for ext in self.filters)
401
+ }
402
+ self.main.set_options(dict(**folders, **files))
403
+
404
+ def protocol_changed(self, *_):
405
+ self._fs = None
406
+ self.main.options = []
407
+ self.url.value = ""
408
+
409
+ def home_clicked(self, *_):
410
+ self.protocol.value = self.init_protocol
411
+ self.kwargs.value = self.init_kwargs
412
+ self.url.value = self.init_url
413
+ self.go_clicked()
414
+
415
+ def up_clicked(self, *_):
416
+ self.url.value = self.fs._parent(self.url.value)
417
+ self.go_clicked()
.venv/lib/python3.13/site-packages/fsspec/mapping.py ADDED
@@ -0,0 +1,251 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import array
2
+ import logging
3
+ import posixpath
4
+ import warnings
5
+ from collections.abc import MutableMapping
6
+ from functools import cached_property
7
+
8
+ from fsspec.core import url_to_fs
9
+
10
+ logger = logging.getLogger("fsspec.mapping")
11
+
12
+
13
+ class FSMap(MutableMapping):
14
+ """Wrap a FileSystem instance as a mutable wrapping.
15
+
16
+ The keys of the mapping become files under the given root, and the
17
+ values (which must be bytes) the contents of those files.
18
+
19
+ Parameters
20
+ ----------
21
+ root: string
22
+ prefix for all the files
23
+ fs: FileSystem instance
24
+ check: bool (=True)
25
+ performs a touch at the location, to check for write access.
26
+
27
+ Examples
28
+ --------
29
+ >>> fs = FileSystem(**parameters) # doctest: +SKIP
30
+ >>> d = FSMap('my-data/path/', fs) # doctest: +SKIP
31
+ or, more likely
32
+ >>> d = fs.get_mapper('my-data/path/')
33
+
34
+ >>> d['loc1'] = b'Hello World' # doctest: +SKIP
35
+ >>> list(d.keys()) # doctest: +SKIP
36
+ ['loc1']
37
+ >>> d['loc1'] # doctest: +SKIP
38
+ b'Hello World'
39
+ """
40
+
41
+ def __init__(self, root, fs, check=False, create=False, missing_exceptions=None):
42
+ self.fs = fs
43
+ self.root = fs._strip_protocol(root)
44
+ self._root_key_to_str = fs._strip_protocol(posixpath.join(root, "x"))[:-1]
45
+ if missing_exceptions is None:
46
+ missing_exceptions = (
47
+ FileNotFoundError,
48
+ IsADirectoryError,
49
+ NotADirectoryError,
50
+ )
51
+ self.missing_exceptions = missing_exceptions
52
+ self.check = check
53
+ self.create = create
54
+ if create:
55
+ if not self.fs.exists(root):
56
+ self.fs.mkdir(root)
57
+ if check:
58
+ if not self.fs.exists(root):
59
+ raise ValueError(
60
+ f"Path {root} does not exist. Create "
61
+ f" with the ``create=True`` keyword"
62
+ )
63
+ self.fs.touch(root + "/a")
64
+ self.fs.rm(root + "/a")
65
+
66
+ @cached_property
67
+ def dirfs(self):
68
+ """dirfs instance that can be used with the same keys as the mapper"""
69
+ from .implementations.dirfs import DirFileSystem
70
+
71
+ return DirFileSystem(path=self._root_key_to_str, fs=self.fs)
72
+
73
+ def clear(self):
74
+ """Remove all keys below root - empties out mapping"""
75
+ logger.info("Clear mapping at %s", self.root)
76
+ try:
77
+ self.fs.rm(self.root, True)
78
+ self.fs.mkdir(self.root)
79
+ except: # noqa: E722
80
+ pass
81
+
82
+ def getitems(self, keys, on_error="raise"):
83
+ """Fetch multiple items from the store
84
+
85
+ If the backend is async-able, this might proceed concurrently
86
+
87
+ Parameters
88
+ ----------
89
+ keys: list(str)
90
+ They keys to be fetched
91
+ on_error : "raise", "omit", "return"
92
+ If raise, an underlying exception will be raised (converted to KeyError
93
+ if the type is in self.missing_exceptions); if omit, keys with exception
94
+ will simply not be included in the output; if "return", all keys are
95
+ included in the output, but the value will be bytes or an exception
96
+ instance.
97
+
98
+ Returns
99
+ -------
100
+ dict(key, bytes|exception)
101
+ """
102
+ keys2 = [self._key_to_str(k) for k in keys]
103
+ oe = on_error if on_error == "raise" else "return"
104
+ try:
105
+ out = self.fs.cat(keys2, on_error=oe)
106
+ if isinstance(out, bytes):
107
+ out = {keys2[0]: out}
108
+ except self.missing_exceptions as e:
109
+ raise KeyError from e
110
+ out = {
111
+ k: (KeyError() if isinstance(v, self.missing_exceptions) else v)
112
+ for k, v in out.items()
113
+ }
114
+ return {
115
+ key: out[k2] if on_error == "raise" else out.get(k2, KeyError(k2))
116
+ for key, k2 in zip(keys, keys2)
117
+ if on_error == "return" or not isinstance(out[k2], BaseException)
118
+ }
119
+
120
+ def setitems(self, values_dict):
121
+ """Set the values of multiple items in the store
122
+
123
+ Parameters
124
+ ----------
125
+ values_dict: dict(str, bytes)
126
+ """
127
+ values = {self._key_to_str(k): maybe_convert(v) for k, v in values_dict.items()}
128
+ self.fs.pipe(values)
129
+
130
+ def delitems(self, keys):
131
+ """Remove multiple keys from the store"""
132
+ self.fs.rm([self._key_to_str(k) for k in keys])
133
+
134
+ def _key_to_str(self, key):
135
+ """Generate full path for the key"""
136
+ if not isinstance(key, str):
137
+ # raise TypeError("key must be of type `str`, got `{type(key).__name__}`"
138
+ warnings.warn(
139
+ "from fsspec 2023.5 onward FSMap non-str keys will raise TypeError",
140
+ DeprecationWarning,
141
+ )
142
+ if isinstance(key, list):
143
+ key = tuple(key)
144
+ key = str(key)
145
+ return f"{self._root_key_to_str}{key}".rstrip("/")
146
+
147
+ def _str_to_key(self, s):
148
+ """Strip path of to leave key name"""
149
+ return s[len(self.root) :].lstrip("/")
150
+
151
+ def __getitem__(self, key, default=None):
152
+ """Retrieve data"""
153
+ k = self._key_to_str(key)
154
+ try:
155
+ result = self.fs.cat(k)
156
+ except self.missing_exceptions as exc:
157
+ if default is not None:
158
+ return default
159
+ raise KeyError(key) from exc
160
+ return result
161
+
162
+ def pop(self, key, default=None):
163
+ """Pop data"""
164
+ result = self.__getitem__(key, default)
165
+ try:
166
+ del self[key]
167
+ except KeyError:
168
+ pass
169
+ return result
170
+
171
+ def __setitem__(self, key, value):
172
+ """Store value in key"""
173
+ key = self._key_to_str(key)
174
+ self.fs.mkdirs(self.fs._parent(key), exist_ok=True)
175
+ self.fs.pipe_file(key, maybe_convert(value))
176
+
177
+ def __iter__(self):
178
+ return (self._str_to_key(x) for x in self.fs.find(self.root))
179
+
180
+ def __len__(self):
181
+ return len(self.fs.find(self.root))
182
+
183
+ def __delitem__(self, key):
184
+ """Remove key"""
185
+ try:
186
+ self.fs.rm(self._key_to_str(key))
187
+ except Exception as exc:
188
+ raise KeyError from exc
189
+
190
+ def __contains__(self, key):
191
+ """Does key exist in mapping?"""
192
+ path = self._key_to_str(key)
193
+ return self.fs.isfile(path)
194
+
195
+ def __reduce__(self):
196
+ return FSMap, (self.root, self.fs, False, False, self.missing_exceptions)
197
+
198
+
199
+ def maybe_convert(value):
200
+ if isinstance(value, array.array) or hasattr(value, "__array__"):
201
+ # bytes-like things
202
+ if hasattr(value, "dtype") and value.dtype.kind in "Mm":
203
+ # The buffer interface doesn't support datetime64/timdelta64 numpy
204
+ # arrays
205
+ value = value.view("int64")
206
+ value = bytes(memoryview(value))
207
+ return value
208
+
209
+
210
+ def get_mapper(
211
+ url="",
212
+ check=False,
213
+ create=False,
214
+ missing_exceptions=None,
215
+ alternate_root=None,
216
+ **kwargs,
217
+ ):
218
+ """Create key-value interface for given URL and options
219
+
220
+ The URL will be of the form "protocol://location" and point to the root
221
+ of the mapper required. All keys will be file-names below this location,
222
+ and their values the contents of each key.
223
+
224
+ Also accepts compound URLs like zip::s3://bucket/file.zip , see ``fsspec.open``.
225
+
226
+ Parameters
227
+ ----------
228
+ url: str
229
+ Root URL of mapping
230
+ check: bool
231
+ Whether to attempt to read from the location before instantiation, to
232
+ check that the mapping does exist
233
+ create: bool
234
+ Whether to make the directory corresponding to the root before
235
+ instantiating
236
+ missing_exceptions: None or tuple
237
+ If given, these exception types will be regarded as missing keys and
238
+ return KeyError when trying to read data. By default, you get
239
+ (FileNotFoundError, IsADirectoryError, NotADirectoryError)
240
+ alternate_root: None or str
241
+ In cases of complex URLs, the parser may fail to pick the correct part
242
+ for the mapper root, so this arg can override
243
+
244
+ Returns
245
+ -------
246
+ ``FSMap`` instance, the dict-like key-value store.
247
+ """
248
+ # Removing protocol here - could defer to each open() on the backend
249
+ fs, urlpath = url_to_fs(url, **kwargs)
250
+ root = alternate_root if alternate_root is not None else urlpath
251
+ return FSMap(root, fs, check, create, missing_exceptions=missing_exceptions)
.venv/lib/python3.13/site-packages/fsspec/parquet.py ADDED
@@ -0,0 +1,541 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import json
3
+ import warnings
4
+
5
+ from .core import url_to_fs
6
+ from .utils import merge_offset_ranges
7
+
8
+ # Parquet-Specific Utilities for fsspec
9
+ #
10
+ # Most of the functions defined in this module are NOT
11
+ # intended for public consumption. The only exception
12
+ # to this is `open_parquet_file`, which should be used
13
+ # place of `fs.open()` to open parquet-formatted files
14
+ # on remote file systems.
15
+
16
+
17
+ def open_parquet_file(
18
+ path,
19
+ mode="rb",
20
+ fs=None,
21
+ metadata=None,
22
+ columns=None,
23
+ row_groups=None,
24
+ storage_options=None,
25
+ strict=False,
26
+ engine="auto",
27
+ max_gap=64_000,
28
+ max_block=256_000_000,
29
+ footer_sample_size=1_000_000,
30
+ **kwargs,
31
+ ):
32
+ """
33
+ Return a file-like object for a single Parquet file.
34
+
35
+ The specified parquet `engine` will be used to parse the
36
+ footer metadata, and determine the required byte ranges
37
+ from the file. The target path will then be opened with
38
+ the "parts" (`KnownPartsOfAFile`) caching strategy.
39
+
40
+ Note that this method is intended for usage with remote
41
+ file systems, and is unlikely to improve parquet-read
42
+ performance on local file systems.
43
+
44
+ Parameters
45
+ ----------
46
+ path: str
47
+ Target file path.
48
+ mode: str, optional
49
+ Mode option to be passed through to `fs.open`. Default is "rb".
50
+ metadata: Any, optional
51
+ Parquet metadata object. Object type must be supported
52
+ by the backend parquet engine. For now, only the "fastparquet"
53
+ engine supports an explicit `ParquetFile` metadata object.
54
+ If a metadata object is supplied, the remote footer metadata
55
+ will not need to be transferred into local memory.
56
+ fs: AbstractFileSystem, optional
57
+ Filesystem object to use for opening the file. If nothing is
58
+ specified, an `AbstractFileSystem` object will be inferred.
59
+ engine : str, default "auto"
60
+ Parquet engine to use for metadata parsing. Allowed options
61
+ include "fastparquet", "pyarrow", and "auto". The specified
62
+ engine must be installed in the current environment. If
63
+ "auto" is specified, and both engines are installed,
64
+ "fastparquet" will take precedence over "pyarrow".
65
+ columns: list, optional
66
+ List of all column names that may be read from the file.
67
+ row_groups : list, optional
68
+ List of all row-groups that may be read from the file. This
69
+ may be a list of row-group indices (integers), or it may be
70
+ a list of `RowGroup` metadata objects (if the "fastparquet"
71
+ engine is used).
72
+ storage_options : dict, optional
73
+ Used to generate an `AbstractFileSystem` object if `fs` was
74
+ not specified.
75
+ strict : bool, optional
76
+ Whether the resulting `KnownPartsOfAFile` cache should
77
+ fetch reads that go beyond a known byte-range boundary.
78
+ If `False` (the default), any read that ends outside a
79
+ known part will be zero padded. Note that using
80
+ `strict=True` may be useful for debugging.
81
+ max_gap : int, optional
82
+ Neighboring byte ranges will only be merged when their
83
+ inter-range gap is <= `max_gap`. Default is 64KB.
84
+ max_block : int, optional
85
+ Neighboring byte ranges will only be merged when the size of
86
+ the aggregated range is <= `max_block`. Default is 256MB.
87
+ footer_sample_size : int, optional
88
+ Number of bytes to read from the end of the path to look
89
+ for the footer metadata. If the sampled bytes do not contain
90
+ the footer, a second read request will be required, and
91
+ performance will suffer. Default is 1MB.
92
+ **kwargs :
93
+ Optional key-word arguments to pass to `fs.open`
94
+ """
95
+
96
+ # Make sure we have an `AbstractFileSystem` object
97
+ # to work with
98
+ if fs is None:
99
+ fs = url_to_fs(path, **(storage_options or {}))[0]
100
+
101
+ # For now, `columns == []` not supported. Just use
102
+ # default `open` command with `path` input
103
+ if columns is not None and len(columns) == 0:
104
+ return fs.open(path, mode=mode)
105
+
106
+ # Set the engine
107
+ engine = _set_engine(engine)
108
+
109
+ # Fetch the known byte ranges needed to read
110
+ # `columns` and/or `row_groups`
111
+ data = _get_parquet_byte_ranges(
112
+ [path],
113
+ fs,
114
+ metadata=metadata,
115
+ columns=columns,
116
+ row_groups=row_groups,
117
+ engine=engine,
118
+ max_gap=max_gap,
119
+ max_block=max_block,
120
+ footer_sample_size=footer_sample_size,
121
+ )
122
+
123
+ # Extract file name from `data`
124
+ fn = next(iter(data)) if data else path
125
+
126
+ # Call self.open with "parts" caching
127
+ options = kwargs.pop("cache_options", {}).copy()
128
+ return fs.open(
129
+ fn,
130
+ mode=mode,
131
+ cache_type="parts",
132
+ cache_options={
133
+ **options,
134
+ "data": data.get(fn, {}),
135
+ "strict": strict,
136
+ },
137
+ **kwargs,
138
+ )
139
+
140
+
141
+ def _get_parquet_byte_ranges(
142
+ paths,
143
+ fs,
144
+ metadata=None,
145
+ columns=None,
146
+ row_groups=None,
147
+ max_gap=64_000,
148
+ max_block=256_000_000,
149
+ footer_sample_size=1_000_000,
150
+ engine="auto",
151
+ ):
152
+ """Get a dictionary of the known byte ranges needed
153
+ to read a specific column/row-group selection from a
154
+ Parquet dataset. Each value in the output dictionary
155
+ is intended for use as the `data` argument for the
156
+ `KnownPartsOfAFile` caching strategy of a single path.
157
+ """
158
+
159
+ # Set engine if necessary
160
+ if isinstance(engine, str):
161
+ engine = _set_engine(engine)
162
+
163
+ # Pass to specialized function if metadata is defined
164
+ if metadata is not None:
165
+ # Use the provided parquet metadata object
166
+ # to avoid transferring/parsing footer metadata
167
+ return _get_parquet_byte_ranges_from_metadata(
168
+ metadata,
169
+ fs,
170
+ engine,
171
+ columns=columns,
172
+ row_groups=row_groups,
173
+ max_gap=max_gap,
174
+ max_block=max_block,
175
+ )
176
+
177
+ # Get file sizes asynchronously
178
+ file_sizes = fs.sizes(paths)
179
+
180
+ # Populate global paths, starts, & ends
181
+ result = {}
182
+ data_paths = []
183
+ data_starts = []
184
+ data_ends = []
185
+ add_header_magic = True
186
+ if columns is None and row_groups is None:
187
+ # We are NOT selecting specific columns or row-groups.
188
+ #
189
+ # We can avoid sampling the footers, and just transfer
190
+ # all file data with cat_ranges
191
+ for i, path in enumerate(paths):
192
+ result[path] = {}
193
+ for b in range(0, file_sizes[i], max_block):
194
+ data_paths.append(path)
195
+ data_starts.append(b)
196
+ data_ends.append(min(b + max_block, file_sizes[i]))
197
+ add_header_magic = False # "Magic" should already be included
198
+ else:
199
+ # We ARE selecting specific columns or row-groups.
200
+ #
201
+ # Gather file footers.
202
+ # We just take the last `footer_sample_size` bytes of each
203
+ # file (or the entire file if it is smaller than that)
204
+ footer_starts = []
205
+ footer_ends = []
206
+ for i, path in enumerate(paths):
207
+ footer_ends.append(file_sizes[i])
208
+ sample_size = max(0, file_sizes[i] - footer_sample_size)
209
+ footer_starts.append(sample_size)
210
+ footer_samples = fs.cat_ranges(paths, footer_starts, footer_ends)
211
+
212
+ # Check our footer samples and re-sample if necessary.
213
+ missing_footer_starts = footer_starts.copy()
214
+ large_footer = 0
215
+ for i, path in enumerate(paths):
216
+ footer_size = int.from_bytes(footer_samples[i][-8:-4], "little")
217
+ real_footer_start = file_sizes[i] - (footer_size + 8)
218
+ if real_footer_start < footer_starts[i]:
219
+ missing_footer_starts[i] = real_footer_start
220
+ large_footer = max(large_footer, (footer_size + 8))
221
+ if large_footer:
222
+ warnings.warn(
223
+ f"Not enough data was used to sample the parquet footer. "
224
+ f"Try setting footer_sample_size >= {large_footer}."
225
+ )
226
+ for i, block in enumerate(
227
+ fs.cat_ranges(
228
+ paths,
229
+ missing_footer_starts,
230
+ footer_starts,
231
+ )
232
+ ):
233
+ footer_samples[i] = block + footer_samples[i]
234
+ footer_starts[i] = missing_footer_starts[i]
235
+
236
+ # Calculate required byte ranges for each path
237
+ for i, path in enumerate(paths):
238
+ # Deal with small-file case.
239
+ # Just include all remaining bytes of the file
240
+ # in a single range.
241
+ if file_sizes[i] < max_block:
242
+ if footer_starts[i] > 0:
243
+ # Only need to transfer the data if the
244
+ # footer sample isn't already the whole file
245
+ data_paths.append(path)
246
+ data_starts.append(0)
247
+ data_ends.append(footer_starts[i])
248
+ continue
249
+
250
+ # Use "engine" to collect data byte ranges
251
+ path_data_starts, path_data_ends = engine._parquet_byte_ranges(
252
+ columns,
253
+ row_groups=row_groups,
254
+ footer=footer_samples[i],
255
+ footer_start=footer_starts[i],
256
+ )
257
+
258
+ data_paths += [path] * len(path_data_starts)
259
+ data_starts += path_data_starts
260
+ data_ends += path_data_ends
261
+
262
+ # Merge adjacent offset ranges
263
+ data_paths, data_starts, data_ends = merge_offset_ranges(
264
+ data_paths,
265
+ data_starts,
266
+ data_ends,
267
+ max_gap=max_gap,
268
+ max_block=max_block,
269
+ sort=False, # Should already be sorted
270
+ )
271
+
272
+ # Start by populating `result` with footer samples
273
+ for i, path in enumerate(paths):
274
+ result[path] = {(footer_starts[i], footer_ends[i]): footer_samples[i]}
275
+
276
+ # Transfer the data byte-ranges into local memory
277
+ _transfer_ranges(fs, result, data_paths, data_starts, data_ends)
278
+
279
+ # Add b"PAR1" to header if necessary
280
+ if add_header_magic:
281
+ _add_header_magic(result)
282
+
283
+ return result
284
+
285
+
286
+ def _get_parquet_byte_ranges_from_metadata(
287
+ metadata,
288
+ fs,
289
+ engine,
290
+ columns=None,
291
+ row_groups=None,
292
+ max_gap=64_000,
293
+ max_block=256_000_000,
294
+ ):
295
+ """Simplified version of `_get_parquet_byte_ranges` for
296
+ the case that an engine-specific `metadata` object is
297
+ provided, and the remote footer metadata does not need to
298
+ be transferred before calculating the required byte ranges.
299
+ """
300
+
301
+ # Use "engine" to collect data byte ranges
302
+ data_paths, data_starts, data_ends = engine._parquet_byte_ranges(
303
+ columns,
304
+ row_groups=row_groups,
305
+ metadata=metadata,
306
+ )
307
+
308
+ # Merge adjacent offset ranges
309
+ data_paths, data_starts, data_ends = merge_offset_ranges(
310
+ data_paths,
311
+ data_starts,
312
+ data_ends,
313
+ max_gap=max_gap,
314
+ max_block=max_block,
315
+ sort=False, # Should be sorted
316
+ )
317
+
318
+ # Transfer the data byte-ranges into local memory
319
+ result = {fn: {} for fn in list(set(data_paths))}
320
+ _transfer_ranges(fs, result, data_paths, data_starts, data_ends)
321
+
322
+ # Add b"PAR1" to header
323
+ _add_header_magic(result)
324
+
325
+ return result
326
+
327
+
328
+ def _transfer_ranges(fs, blocks, paths, starts, ends):
329
+ # Use cat_ranges to gather the data byte_ranges
330
+ ranges = (paths, starts, ends)
331
+ for path, start, stop, data in zip(*ranges, fs.cat_ranges(*ranges)):
332
+ blocks[path][(start, stop)] = data
333
+
334
+
335
+ def _add_header_magic(data):
336
+ # Add b"PAR1" to file headers
337
+ for path in list(data.keys()):
338
+ add_magic = True
339
+ for k in data[path]:
340
+ if k[0] == 0 and k[1] >= 4:
341
+ add_magic = False
342
+ break
343
+ if add_magic:
344
+ data[path][(0, 4)] = b"PAR1"
345
+
346
+
347
+ def _set_engine(engine_str):
348
+ # Define a list of parquet engines to try
349
+ if engine_str == "auto":
350
+ try_engines = ("fastparquet", "pyarrow")
351
+ elif not isinstance(engine_str, str):
352
+ raise ValueError(
353
+ "Failed to set parquet engine! "
354
+ "Please pass 'fastparquet', 'pyarrow', or 'auto'"
355
+ )
356
+ elif engine_str not in ("fastparquet", "pyarrow"):
357
+ raise ValueError(f"{engine_str} engine not supported by `fsspec.parquet`")
358
+ else:
359
+ try_engines = [engine_str]
360
+
361
+ # Try importing the engines in `try_engines`,
362
+ # and choose the first one that succeeds
363
+ for engine in try_engines:
364
+ try:
365
+ if engine == "fastparquet":
366
+ return FastparquetEngine()
367
+ elif engine == "pyarrow":
368
+ return PyarrowEngine()
369
+ except ImportError:
370
+ pass
371
+
372
+ # Raise an error if a supported parquet engine
373
+ # was not found
374
+ raise ImportError(
375
+ f"The following parquet engines are not installed "
376
+ f"in your python environment: {try_engines}."
377
+ f"Please install 'fastparquert' or 'pyarrow' to "
378
+ f"utilize the `fsspec.parquet` module."
379
+ )
380
+
381
+
382
+ class FastparquetEngine:
383
+ # The purpose of the FastparquetEngine class is
384
+ # to check if fastparquet can be imported (on initialization)
385
+ # and to define a `_parquet_byte_ranges` method. In the
386
+ # future, this class may also be used to define other
387
+ # methods/logic that are specific to fastparquet.
388
+
389
+ def __init__(self):
390
+ import fastparquet as fp
391
+
392
+ self.fp = fp
393
+
394
+ def _row_group_filename(self, row_group, pf):
395
+ return pf.row_group_filename(row_group)
396
+
397
+ def _parquet_byte_ranges(
398
+ self,
399
+ columns,
400
+ row_groups=None,
401
+ metadata=None,
402
+ footer=None,
403
+ footer_start=None,
404
+ ):
405
+ # Initialize offset ranges and define ParqetFile metadata
406
+ pf = metadata
407
+ data_paths, data_starts, data_ends = [], [], []
408
+ if pf is None:
409
+ pf = self.fp.ParquetFile(io.BytesIO(footer))
410
+
411
+ # Convert columns to a set and add any index columns
412
+ # specified in the pandas metadata (just in case)
413
+ column_set = None if columns is None else set(columns)
414
+ if column_set is not None and hasattr(pf, "pandas_metadata"):
415
+ md_index = [
416
+ ind
417
+ for ind in pf.pandas_metadata.get("index_columns", [])
418
+ # Ignore RangeIndex information
419
+ if not isinstance(ind, dict)
420
+ ]
421
+ column_set |= set(md_index)
422
+
423
+ # Check if row_groups is a list of integers
424
+ # or a list of row-group metadata
425
+ if row_groups and not isinstance(row_groups[0], int):
426
+ # Input row_groups contains row-group metadata
427
+ row_group_indices = None
428
+ else:
429
+ # Input row_groups contains row-group indices
430
+ row_group_indices = row_groups
431
+ row_groups = pf.row_groups
432
+
433
+ # Loop through column chunks to add required byte ranges
434
+ for r, row_group in enumerate(row_groups):
435
+ # Skip this row-group if we are targeting
436
+ # specific row-groups
437
+ if row_group_indices is None or r in row_group_indices:
438
+ # Find the target parquet-file path for `row_group`
439
+ fn = self._row_group_filename(row_group, pf)
440
+
441
+ for column in row_group.columns:
442
+ name = column.meta_data.path_in_schema[0]
443
+ # Skip this column if we are targeting a
444
+ # specific columns
445
+ if column_set is None or name in column_set:
446
+ file_offset0 = column.meta_data.dictionary_page_offset
447
+ if file_offset0 is None:
448
+ file_offset0 = column.meta_data.data_page_offset
449
+ num_bytes = column.meta_data.total_compressed_size
450
+ if footer_start is None or file_offset0 < footer_start:
451
+ data_paths.append(fn)
452
+ data_starts.append(file_offset0)
453
+ data_ends.append(
454
+ min(
455
+ file_offset0 + num_bytes,
456
+ footer_start or (file_offset0 + num_bytes),
457
+ )
458
+ )
459
+
460
+ if metadata:
461
+ # The metadata in this call may map to multiple
462
+ # file paths. Need to include `data_paths`
463
+ return data_paths, data_starts, data_ends
464
+ return data_starts, data_ends
465
+
466
+
467
+ class PyarrowEngine:
468
+ # The purpose of the PyarrowEngine class is
469
+ # to check if pyarrow can be imported (on initialization)
470
+ # and to define a `_parquet_byte_ranges` method. In the
471
+ # future, this class may also be used to define other
472
+ # methods/logic that are specific to pyarrow.
473
+
474
+ def __init__(self):
475
+ import pyarrow.parquet as pq
476
+
477
+ self.pq = pq
478
+
479
+ def _row_group_filename(self, row_group, metadata):
480
+ raise NotImplementedError
481
+
482
+ def _parquet_byte_ranges(
483
+ self,
484
+ columns,
485
+ row_groups=None,
486
+ metadata=None,
487
+ footer=None,
488
+ footer_start=None,
489
+ ):
490
+ if metadata is not None:
491
+ raise ValueError("metadata input not supported for PyarrowEngine")
492
+
493
+ data_starts, data_ends = [], []
494
+ md = self.pq.ParquetFile(io.BytesIO(footer)).metadata
495
+
496
+ # Convert columns to a set and add any index columns
497
+ # specified in the pandas metadata (just in case)
498
+ column_set = None if columns is None else set(columns)
499
+ if column_set is not None:
500
+ schema = md.schema.to_arrow_schema()
501
+ has_pandas_metadata = (
502
+ schema.metadata is not None and b"pandas" in schema.metadata
503
+ )
504
+ if has_pandas_metadata:
505
+ md_index = [
506
+ ind
507
+ for ind in json.loads(
508
+ schema.metadata[b"pandas"].decode("utf8")
509
+ ).get("index_columns", [])
510
+ # Ignore RangeIndex information
511
+ if not isinstance(ind, dict)
512
+ ]
513
+ column_set |= set(md_index)
514
+
515
+ # Loop through column chunks to add required byte ranges
516
+ for r in range(md.num_row_groups):
517
+ # Skip this row-group if we are targeting
518
+ # specific row-groups
519
+ if row_groups is None or r in row_groups:
520
+ row_group = md.row_group(r)
521
+ for c in range(row_group.num_columns):
522
+ column = row_group.column(c)
523
+ name = column.path_in_schema
524
+ # Skip this column if we are targeting a
525
+ # specific columns
526
+ split_name = name.split(".")[0]
527
+ if (
528
+ column_set is None
529
+ or name in column_set
530
+ or split_name in column_set
531
+ ):
532
+ file_offset0 = column.dictionary_page_offset
533
+ if file_offset0 is None:
534
+ file_offset0 = column.data_page_offset
535
+ num_bytes = column.total_compressed_size
536
+ if file_offset0 < footer_start:
537
+ data_starts.append(file_offset0)
538
+ data_ends.append(
539
+ min(file_offset0 + num_bytes, footer_start)
540
+ )
541
+ return data_starts, data_ends
.venv/lib/python3.13/site-packages/fsspec/registry.py ADDED
@@ -0,0 +1,330 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import importlib
4
+ import types
5
+ import warnings
6
+
7
+ __all__ = ["registry", "get_filesystem_class", "default"]
8
+
9
+ # internal, mutable
10
+ _registry: dict[str, type] = {}
11
+
12
+ # external, immutable
13
+ registry = types.MappingProxyType(_registry)
14
+ default = "file"
15
+
16
+
17
+ def register_implementation(name, cls, clobber=False, errtxt=None):
18
+ """Add implementation class to the registry
19
+
20
+ Parameters
21
+ ----------
22
+ name: str
23
+ Protocol name to associate with the class
24
+ cls: class or str
25
+ if a class: fsspec-compliant implementation class (normally inherits from
26
+ ``fsspec.AbstractFileSystem``, gets added straight to the registry. If a
27
+ str, the full path to an implementation class like package.module.class,
28
+ which gets added to known_implementations,
29
+ so the import is deferred until the filesystem is actually used.
30
+ clobber: bool (optional)
31
+ Whether to overwrite a protocol with the same name; if False, will raise
32
+ instead.
33
+ errtxt: str (optional)
34
+ If given, then a failure to import the given class will result in this
35
+ text being given.
36
+ """
37
+ if isinstance(cls, str):
38
+ if name in known_implementations and clobber is False:
39
+ if cls != known_implementations[name]["class"]:
40
+ raise ValueError(
41
+ f"Name ({name}) already in the known_implementations and clobber "
42
+ f"is False"
43
+ )
44
+ else:
45
+ known_implementations[name] = {
46
+ "class": cls,
47
+ "err": errtxt or f"{cls} import failed for protocol {name}",
48
+ }
49
+
50
+ else:
51
+ if name in registry and clobber is False:
52
+ if _registry[name] is not cls:
53
+ raise ValueError(
54
+ f"Name ({name}) already in the registry and clobber is False"
55
+ )
56
+ else:
57
+ _registry[name] = cls
58
+
59
+
60
+ # protocols mapped to the class which implements them. This dict can be
61
+ # updated with register_implementation
62
+ known_implementations = {
63
+ "abfs": {
64
+ "class": "adlfs.AzureBlobFileSystem",
65
+ "err": "Install adlfs to access Azure Datalake Gen2 and Azure Blob Storage",
66
+ },
67
+ "adl": {
68
+ "class": "adlfs.AzureDatalakeFileSystem",
69
+ "err": "Install adlfs to access Azure Datalake Gen1",
70
+ },
71
+ "arrow_hdfs": {
72
+ "class": "fsspec.implementations.arrow.HadoopFileSystem",
73
+ "err": "pyarrow and local java libraries required for HDFS",
74
+ },
75
+ "asynclocal": {
76
+ "class": "morefs.asyn_local.AsyncLocalFileSystem",
77
+ "err": "Install 'morefs[asynclocalfs]' to use AsyncLocalFileSystem",
78
+ },
79
+ "asyncwrapper": {
80
+ "class": "fsspec.implementations.asyn_wrapper.AsyncFileSystemWrapper",
81
+ },
82
+ "az": {
83
+ "class": "adlfs.AzureBlobFileSystem",
84
+ "err": "Install adlfs to access Azure Datalake Gen2 and Azure Blob Storage",
85
+ },
86
+ "blockcache": {"class": "fsspec.implementations.cached.CachingFileSystem"},
87
+ "box": {
88
+ "class": "boxfs.BoxFileSystem",
89
+ "err": "Please install boxfs to access BoxFileSystem",
90
+ },
91
+ "cached": {"class": "fsspec.implementations.cached.CachingFileSystem"},
92
+ "dask": {
93
+ "class": "fsspec.implementations.dask.DaskWorkerFileSystem",
94
+ "err": "Install dask distributed to access worker file system",
95
+ },
96
+ "data": {"class": "fsspec.implementations.data.DataFileSystem"},
97
+ "dbfs": {
98
+ "class": "fsspec.implementations.dbfs.DatabricksFileSystem",
99
+ "err": "Install the requests package to use the DatabricksFileSystem",
100
+ },
101
+ "dir": {"class": "fsspec.implementations.dirfs.DirFileSystem"},
102
+ "dropbox": {
103
+ "class": "dropboxdrivefs.DropboxDriveFileSystem",
104
+ "err": (
105
+ 'DropboxFileSystem requires "dropboxdrivefs","requests" and "'
106
+ '"dropbox" to be installed'
107
+ ),
108
+ },
109
+ "dvc": {
110
+ "class": "dvc.api.DVCFileSystem",
111
+ "err": "Install dvc to access DVCFileSystem",
112
+ },
113
+ "file": {"class": "fsspec.implementations.local.LocalFileSystem"},
114
+ "filecache": {"class": "fsspec.implementations.cached.WholeFileCacheFileSystem"},
115
+ "ftp": {"class": "fsspec.implementations.ftp.FTPFileSystem"},
116
+ "gcs": {
117
+ "class": "gcsfs.GCSFileSystem",
118
+ "err": "Please install gcsfs to access Google Storage",
119
+ },
120
+ "gdrive": {
121
+ "class": "gdrive_fsspec.GoogleDriveFileSystem",
122
+ "err": "Please install gdrive_fs for access to Google Drive",
123
+ },
124
+ "generic": {"class": "fsspec.generic.GenericFileSystem"},
125
+ "gist": {
126
+ "class": "fsspec.implementations.gist.GistFileSystem",
127
+ "err": "Install the requests package to use the gist FS",
128
+ },
129
+ "git": {
130
+ "class": "fsspec.implementations.git.GitFileSystem",
131
+ "err": "Install pygit2 to browse local git repos",
132
+ },
133
+ "github": {
134
+ "class": "fsspec.implementations.github.GithubFileSystem",
135
+ "err": "Install the requests package to use the github FS",
136
+ },
137
+ "gs": {
138
+ "class": "gcsfs.GCSFileSystem",
139
+ "err": "Please install gcsfs to access Google Storage",
140
+ },
141
+ "hdfs": {
142
+ "class": "fsspec.implementations.arrow.HadoopFileSystem",
143
+ "err": "pyarrow and local java libraries required for HDFS",
144
+ },
145
+ "hf": {
146
+ "class": "huggingface_hub.HfFileSystem",
147
+ "err": "Install huggingface_hub to access HfFileSystem",
148
+ },
149
+ "http": {
150
+ "class": "fsspec.implementations.http.HTTPFileSystem",
151
+ "err": 'HTTPFileSystem requires "requests" and "aiohttp" to be installed',
152
+ },
153
+ "https": {
154
+ "class": "fsspec.implementations.http.HTTPFileSystem",
155
+ "err": 'HTTPFileSystem requires "requests" and "aiohttp" to be installed',
156
+ },
157
+ "jlab": {
158
+ "class": "fsspec.implementations.jupyter.JupyterFileSystem",
159
+ "err": "Jupyter FS requires requests to be installed",
160
+ },
161
+ "jupyter": {
162
+ "class": "fsspec.implementations.jupyter.JupyterFileSystem",
163
+ "err": "Jupyter FS requires requests to be installed",
164
+ },
165
+ "lakefs": {
166
+ "class": "lakefs_spec.LakeFSFileSystem",
167
+ "err": "Please install lakefs-spec to access LakeFSFileSystem",
168
+ },
169
+ "libarchive": {
170
+ "class": "fsspec.implementations.libarchive.LibArchiveFileSystem",
171
+ "err": "LibArchive requires to be installed",
172
+ },
173
+ "local": {"class": "fsspec.implementations.local.LocalFileSystem"},
174
+ "memory": {"class": "fsspec.implementations.memory.MemoryFileSystem"},
175
+ "oci": {
176
+ "class": "ocifs.OCIFileSystem",
177
+ "err": "Install ocifs to access OCI Object Storage",
178
+ },
179
+ "ocilake": {
180
+ "class": "ocifs.OCIFileSystem",
181
+ "err": "Install ocifs to access OCI Data Lake",
182
+ },
183
+ "oss": {
184
+ "class": "ossfs.OSSFileSystem",
185
+ "err": "Install ossfs to access Alibaba Object Storage System",
186
+ },
187
+ "pyscript": {
188
+ "class": "pyscript_fsspec_client.client.PyscriptFileSystem",
189
+ "err": "Install requests (cpython) or run in pyscript",
190
+ },
191
+ "reference": {"class": "fsspec.implementations.reference.ReferenceFileSystem"},
192
+ "root": {
193
+ "class": "fsspec_xrootd.XRootDFileSystem",
194
+ "err": (
195
+ "Install fsspec-xrootd to access xrootd storage system. "
196
+ "Note: 'root' is the protocol name for xrootd storage systems, "
197
+ "not referring to root directories"
198
+ ),
199
+ },
200
+ "s3": {"class": "s3fs.S3FileSystem", "err": "Install s3fs to access S3"},
201
+ "s3a": {"class": "s3fs.S3FileSystem", "err": "Install s3fs to access S3"},
202
+ "sftp": {
203
+ "class": "fsspec.implementations.sftp.SFTPFileSystem",
204
+ "err": 'SFTPFileSystem requires "paramiko" to be installed',
205
+ },
206
+ "simplecache": {"class": "fsspec.implementations.cached.SimpleCacheFileSystem"},
207
+ "smb": {
208
+ "class": "fsspec.implementations.smb.SMBFileSystem",
209
+ "err": 'SMB requires "smbprotocol" or "smbprotocol[kerberos]" installed',
210
+ },
211
+ "ssh": {
212
+ "class": "fsspec.implementations.sftp.SFTPFileSystem",
213
+ "err": 'SFTPFileSystem requires "paramiko" to be installed',
214
+ },
215
+ "tar": {"class": "fsspec.implementations.tar.TarFileSystem"},
216
+ "tos": {
217
+ "class": "tosfs.TosFileSystem",
218
+ "err": "Install tosfs to access ByteDance volcano engine Tinder Object Storage",
219
+ },
220
+ "tosfs": {
221
+ "class": "tosfs.TosFileSystem",
222
+ "err": "Install tosfs to access ByteDance volcano engine Tinder Object Storage",
223
+ },
224
+ "wandb": {"class": "wandbfs.WandbFS", "err": "Install wandbfs to access wandb"},
225
+ "webdav": {
226
+ "class": "webdav4.fsspec.WebdavFileSystem",
227
+ "err": "Install webdav4 to access WebDAV",
228
+ },
229
+ "webhdfs": {
230
+ "class": "fsspec.implementations.webhdfs.WebHDFS",
231
+ "err": 'webHDFS access requires "requests" to be installed',
232
+ },
233
+ "zip": {"class": "fsspec.implementations.zip.ZipFileSystem"},
234
+ }
235
+
236
+ assert list(known_implementations) == sorted(known_implementations), (
237
+ "Not in alphabetical order"
238
+ )
239
+
240
+
241
+ def get_filesystem_class(protocol):
242
+ """Fetch named protocol implementation from the registry
243
+
244
+ The dict ``known_implementations`` maps protocol names to the locations
245
+ of classes implementing the corresponding file-system. When used for the
246
+ first time, appropriate imports will happen and the class will be placed in
247
+ the registry. All subsequent calls will fetch directly from the registry.
248
+
249
+ Some protocol implementations require additional dependencies, and so the
250
+ import may fail. In this case, the string in the "err" field of the
251
+ ``known_implementations`` will be given as the error message.
252
+ """
253
+ if not protocol:
254
+ protocol = default
255
+
256
+ if protocol not in registry:
257
+ if protocol not in known_implementations:
258
+ raise ValueError(f"Protocol not known: {protocol}")
259
+ bit = known_implementations[protocol]
260
+ try:
261
+ register_implementation(protocol, _import_class(bit["class"]))
262
+ except ImportError as e:
263
+ raise ImportError(bit.get("err")) from e
264
+ cls = registry[protocol]
265
+ if getattr(cls, "protocol", None) in ("abstract", None):
266
+ cls.protocol = protocol
267
+
268
+ return cls
269
+
270
+
271
+ s3_msg = """Your installed version of s3fs is very old and known to cause
272
+ severe performance issues, see also https://github.com/dask/dask/issues/10276
273
+
274
+ To fix, you should specify a lower version bound on s3fs, or
275
+ update the current installation.
276
+ """
277
+
278
+
279
+ def _import_class(fqp: str):
280
+ """Take a fully-qualified path and return the imported class or identifier.
281
+
282
+ ``fqp`` is of the form "package.module.klass" or
283
+ "package.module:subobject.klass".
284
+
285
+ Warnings
286
+ --------
287
+ This can import arbitrary modules. Make sure you haven't installed any modules
288
+ that may execute malicious code at import time.
289
+ """
290
+ if ":" in fqp:
291
+ mod, name = fqp.rsplit(":", 1)
292
+ else:
293
+ mod, name = fqp.rsplit(".", 1)
294
+
295
+ is_s3 = mod == "s3fs"
296
+ mod = importlib.import_module(mod)
297
+ if is_s3 and mod.__version__.split(".") < ["0", "5"]:
298
+ warnings.warn(s3_msg)
299
+ for part in name.split("."):
300
+ mod = getattr(mod, part)
301
+
302
+ if not isinstance(mod, type):
303
+ raise TypeError(f"{fqp} is not a class")
304
+
305
+ return mod
306
+
307
+
308
+ def filesystem(protocol, **storage_options):
309
+ """Instantiate filesystems for given protocol and arguments
310
+
311
+ ``storage_options`` are specific to the protocol being chosen, and are
312
+ passed directly to the class.
313
+ """
314
+ if protocol == "arrow_hdfs":
315
+ warnings.warn(
316
+ "The 'arrow_hdfs' protocol has been deprecated and will be "
317
+ "removed in the future. Specify it as 'hdfs'.",
318
+ DeprecationWarning,
319
+ )
320
+
321
+ cls = get_filesystem_class(protocol)
322
+ return cls(**storage_options)
323
+
324
+
325
+ def available_protocols():
326
+ """Return a list of the implemented protocols.
327
+
328
+ Note that any given protocol may require extra packages to be importable.
329
+ """
330
+ return list(known_implementations)
.venv/lib/python3.13/site-packages/fsspec/spec.py ADDED
@@ -0,0 +1,2270 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import io
4
+ import json
5
+ import logging
6
+ import os
7
+ import threading
8
+ import warnings
9
+ import weakref
10
+ from errno import ESPIPE
11
+ from glob import has_magic
12
+ from hashlib import sha256
13
+ from typing import Any, ClassVar
14
+
15
+ from .callbacks import DEFAULT_CALLBACK
16
+ from .config import apply_config, conf
17
+ from .dircache import DirCache
18
+ from .transaction import Transaction
19
+ from .utils import (
20
+ _unstrip_protocol,
21
+ glob_translate,
22
+ isfilelike,
23
+ other_paths,
24
+ read_block,
25
+ stringify_path,
26
+ tokenize,
27
+ )
28
+
29
+ logger = logging.getLogger("fsspec")
30
+
31
+
32
+ def make_instance(cls, args, kwargs):
33
+ return cls(*args, **kwargs)
34
+
35
+
36
+ class _Cached(type):
37
+ """
38
+ Metaclass for caching file system instances.
39
+
40
+ Notes
41
+ -----
42
+ Instances are cached according to
43
+
44
+ * The values of the class attributes listed in `_extra_tokenize_attributes`
45
+ * The arguments passed to ``__init__``.
46
+
47
+ This creates an additional reference to the filesystem, which prevents the
48
+ filesystem from being garbage collected when all *user* references go away.
49
+ A call to the :meth:`AbstractFileSystem.clear_instance_cache` must *also*
50
+ be made for a filesystem instance to be garbage collected.
51
+ """
52
+
53
+ def __init__(cls, *args, **kwargs):
54
+ super().__init__(*args, **kwargs)
55
+ # Note: we intentionally create a reference here, to avoid garbage
56
+ # collecting instances when all other references are gone. To really
57
+ # delete a FileSystem, the cache must be cleared.
58
+ if conf.get("weakref_instance_cache"): # pragma: no cover
59
+ # debug option for analysing fork/spawn conditions
60
+ cls._cache = weakref.WeakValueDictionary()
61
+ else:
62
+ cls._cache = {}
63
+ cls._pid = os.getpid()
64
+
65
+ def __call__(cls, *args, **kwargs):
66
+ kwargs = apply_config(cls, kwargs)
67
+ extra_tokens = tuple(
68
+ getattr(cls, attr, None) for attr in cls._extra_tokenize_attributes
69
+ )
70
+ token = tokenize(
71
+ cls, cls._pid, threading.get_ident(), *args, *extra_tokens, **kwargs
72
+ )
73
+ skip = kwargs.pop("skip_instance_cache", False)
74
+ if os.getpid() != cls._pid:
75
+ cls._cache.clear()
76
+ cls._pid = os.getpid()
77
+ if not skip and cls.cachable and token in cls._cache:
78
+ cls._latest = token
79
+ return cls._cache[token]
80
+ else:
81
+ obj = super().__call__(*args, **kwargs)
82
+ # Setting _fs_token here causes some static linters to complain.
83
+ obj._fs_token_ = token
84
+ obj.storage_args = args
85
+ obj.storage_options = kwargs
86
+ if obj.async_impl and obj.mirror_sync_methods:
87
+ from .asyn import mirror_sync_methods
88
+
89
+ mirror_sync_methods(obj)
90
+
91
+ if cls.cachable and not skip:
92
+ cls._latest = token
93
+ cls._cache[token] = obj
94
+ return obj
95
+
96
+
97
+ class AbstractFileSystem(metaclass=_Cached):
98
+ """
99
+ An abstract super-class for pythonic file-systems
100
+
101
+ Implementations are expected to be compatible with or, better, subclass
102
+ from here.
103
+ """
104
+
105
+ cachable = True # this class can be cached, instances reused
106
+ _cached = False
107
+ blocksize = 2**22
108
+ sep = "/"
109
+ protocol: ClassVar[str | tuple[str, ...]] = "abstract"
110
+ _latest = None
111
+ async_impl = False
112
+ mirror_sync_methods = False
113
+ root_marker = "" # For some FSs, may require leading '/' or other character
114
+ transaction_type = Transaction
115
+
116
+ #: Extra *class attributes* that should be considered when hashing.
117
+ _extra_tokenize_attributes = ()
118
+
119
+ # Set by _Cached metaclass
120
+ storage_args: tuple[Any, ...]
121
+ storage_options: dict[str, Any]
122
+
123
+ def __init__(self, *args, **storage_options):
124
+ """Create and configure file-system instance
125
+
126
+ Instances may be cachable, so if similar enough arguments are seen
127
+ a new instance is not required. The token attribute exists to allow
128
+ implementations to cache instances if they wish.
129
+
130
+ A reasonable default should be provided if there are no arguments.
131
+
132
+ Subclasses should call this method.
133
+
134
+ Parameters
135
+ ----------
136
+ use_listings_cache, listings_expiry_time, max_paths:
137
+ passed to ``DirCache``, if the implementation supports
138
+ directory listing caching. Pass use_listings_cache=False
139
+ to disable such caching.
140
+ skip_instance_cache: bool
141
+ If this is a cachable implementation, pass True here to force
142
+ creating a new instance even if a matching instance exists, and prevent
143
+ storing this instance.
144
+ asynchronous: bool
145
+ loop: asyncio-compatible IOLoop or None
146
+ """
147
+ if self._cached:
148
+ # reusing instance, don't change
149
+ return
150
+ self._cached = True
151
+ self._intrans = False
152
+ self._transaction = None
153
+ self._invalidated_caches_in_transaction = []
154
+ self.dircache = DirCache(**storage_options)
155
+
156
+ if storage_options.pop("add_docs", None):
157
+ warnings.warn("add_docs is no longer supported.", FutureWarning)
158
+
159
+ if storage_options.pop("add_aliases", None):
160
+ warnings.warn("add_aliases has been removed.", FutureWarning)
161
+ # This is set in _Cached
162
+ self._fs_token_ = None
163
+
164
+ @property
165
+ def fsid(self):
166
+ """Persistent filesystem id that can be used to compare filesystems
167
+ across sessions.
168
+ """
169
+ raise NotImplementedError
170
+
171
+ @property
172
+ def _fs_token(self):
173
+ return self._fs_token_
174
+
175
+ def __dask_tokenize__(self):
176
+ return self._fs_token
177
+
178
+ def __hash__(self):
179
+ return int(self._fs_token, 16)
180
+
181
+ def __eq__(self, other):
182
+ return isinstance(other, type(self)) and self._fs_token == other._fs_token
183
+
184
+ def __reduce__(self):
185
+ return make_instance, (type(self), self.storage_args, self.storage_options)
186
+
187
+ @classmethod
188
+ def _strip_protocol(cls, path):
189
+ """Turn path from fully-qualified to file-system-specific
190
+
191
+ May require FS-specific handling, e.g., for relative paths or links.
192
+ """
193
+ if isinstance(path, list):
194
+ return [cls._strip_protocol(p) for p in path]
195
+ path = stringify_path(path)
196
+ protos = (cls.protocol,) if isinstance(cls.protocol, str) else cls.protocol
197
+ for protocol in protos:
198
+ if path.startswith(protocol + "://"):
199
+ path = path[len(protocol) + 3 :]
200
+ elif path.startswith(protocol + "::"):
201
+ path = path[len(protocol) + 2 :]
202
+ path = path.rstrip("/")
203
+ # use of root_marker to make minimum required path, e.g., "/"
204
+ return path or cls.root_marker
205
+
206
+ def unstrip_protocol(self, name: str) -> str:
207
+ """Format FS-specific path to generic, including protocol"""
208
+ protos = (self.protocol,) if isinstance(self.protocol, str) else self.protocol
209
+ for protocol in protos:
210
+ if name.startswith(f"{protocol}://"):
211
+ return name
212
+ return f"{protos[0]}://{name}"
213
+
214
+ @staticmethod
215
+ def _get_kwargs_from_urls(path):
216
+ """If kwargs can be encoded in the paths, extract them here
217
+
218
+ This should happen before instantiation of the class; incoming paths
219
+ then should be amended to strip the options in methods.
220
+
221
+ Examples may look like an sftp path "sftp://user@host:/my/path", where
222
+ the user and host should become kwargs and later get stripped.
223
+ """
224
+ # by default, nothing happens
225
+ return {}
226
+
227
+ @classmethod
228
+ def current(cls):
229
+ """Return the most recently instantiated FileSystem
230
+
231
+ If no instance has been created, then create one with defaults
232
+ """
233
+ if cls._latest in cls._cache:
234
+ return cls._cache[cls._latest]
235
+ return cls()
236
+
237
+ @property
238
+ def transaction(self):
239
+ """A context within which files are committed together upon exit
240
+
241
+ Requires the file class to implement `.commit()` and `.discard()`
242
+ for the normal and exception cases.
243
+ """
244
+ if self._transaction is None:
245
+ self._transaction = self.transaction_type(self)
246
+ return self._transaction
247
+
248
+ def start_transaction(self):
249
+ """Begin write transaction for deferring files, non-context version"""
250
+ self._intrans = True
251
+ self._transaction = self.transaction_type(self)
252
+ return self.transaction
253
+
254
+ def end_transaction(self):
255
+ """Finish write transaction, non-context version"""
256
+ self.transaction.complete()
257
+ self._transaction = None
258
+ # The invalid cache must be cleared after the transaction is completed.
259
+ for path in self._invalidated_caches_in_transaction:
260
+ self.invalidate_cache(path)
261
+ self._invalidated_caches_in_transaction.clear()
262
+
263
+ def invalidate_cache(self, path=None):
264
+ """
265
+ Discard any cached directory information
266
+
267
+ Parameters
268
+ ----------
269
+ path: string or None
270
+ If None, clear all listings cached else listings at or under given
271
+ path.
272
+ """
273
+ # Not necessary to implement invalidation mechanism, may have no cache.
274
+ # But if have, you should call this method of parent class from your
275
+ # subclass to ensure expiring caches after transacations correctly.
276
+ # See the implementation of FTPFileSystem in ftp.py
277
+ if self._intrans:
278
+ self._invalidated_caches_in_transaction.append(path)
279
+
280
+ def mkdir(self, path, create_parents=True, **kwargs):
281
+ """
282
+ Create directory entry at path
283
+
284
+ For systems that don't have true directories, may create an for
285
+ this instance only and not touch the real filesystem
286
+
287
+ Parameters
288
+ ----------
289
+ path: str
290
+ location
291
+ create_parents: bool
292
+ if True, this is equivalent to ``makedirs``
293
+ kwargs:
294
+ may be permissions, etc.
295
+ """
296
+ pass # not necessary to implement, may not have directories
297
+
298
+ def makedirs(self, path, exist_ok=False):
299
+ """Recursively make directories
300
+
301
+ Creates directory at path and any intervening required directories.
302
+ Raises exception if, for instance, the path already exists but is a
303
+ file.
304
+
305
+ Parameters
306
+ ----------
307
+ path: str
308
+ leaf directory name
309
+ exist_ok: bool (False)
310
+ If False, will error if the target already exists
311
+ """
312
+ pass # not necessary to implement, may not have directories
313
+
314
+ def rmdir(self, path):
315
+ """Remove a directory, if empty"""
316
+ pass # not necessary to implement, may not have directories
317
+
318
+ def ls(self, path, detail=True, **kwargs):
319
+ """List objects at path.
320
+
321
+ This should include subdirectories and files at that location. The
322
+ difference between a file and a directory must be clear when details
323
+ are requested.
324
+
325
+ The specific keys, or perhaps a FileInfo class, or similar, is TBD,
326
+ but must be consistent across implementations.
327
+ Must include:
328
+
329
+ - full path to the entry (without protocol)
330
+ - size of the entry, in bytes. If the value cannot be determined, will
331
+ be ``None``.
332
+ - type of entry, "file", "directory" or other
333
+
334
+ Additional information
335
+ may be present, appropriate to the file-system, e.g., generation,
336
+ checksum, etc.
337
+
338
+ May use refresh=True|False to allow use of self._ls_from_cache to
339
+ check for a saved listing and avoid calling the backend. This would be
340
+ common where listing may be expensive.
341
+
342
+ Parameters
343
+ ----------
344
+ path: str
345
+ detail: bool
346
+ if True, gives a list of dictionaries, where each is the same as
347
+ the result of ``info(path)``. If False, gives a list of paths
348
+ (str).
349
+ kwargs: may have additional backend-specific options, such as version
350
+ information
351
+
352
+ Returns
353
+ -------
354
+ List of strings if detail is False, or list of directory information
355
+ dicts if detail is True.
356
+ """
357
+ raise NotImplementedError
358
+
359
+ def _ls_from_cache(self, path):
360
+ """Check cache for listing
361
+
362
+ Returns listing, if found (may be empty list for a directly that exists
363
+ but contains nothing), None if not in cache.
364
+ """
365
+ parent = self._parent(path)
366
+ try:
367
+ return self.dircache[path.rstrip("/")]
368
+ except KeyError:
369
+ pass
370
+ try:
371
+ files = [
372
+ f
373
+ for f in self.dircache[parent]
374
+ if f["name"] == path
375
+ or (f["name"] == path.rstrip("/") and f["type"] == "directory")
376
+ ]
377
+ if len(files) == 0:
378
+ # parent dir was listed but did not contain this file
379
+ raise FileNotFoundError(path)
380
+ return files
381
+ except KeyError:
382
+ pass
383
+
384
+ def walk(self, path, maxdepth=None, topdown=True, on_error="omit", **kwargs):
385
+ """Return all files under the given path.
386
+
387
+ List all files, recursing into subdirectories; output is iterator-style,
388
+ like ``os.walk()``. For a simple list of files, ``find()`` is available.
389
+
390
+ When topdown is True, the caller can modify the dirnames list in-place (perhaps
391
+ using del or slice assignment), and walk() will
392
+ only recurse into the subdirectories whose names remain in dirnames;
393
+ this can be used to prune the search, impose a specific order of visiting,
394
+ or even to inform walk() about directories the caller creates or renames before
395
+ it resumes walk() again.
396
+ Modifying dirnames when topdown is False has no effect. (see os.walk)
397
+
398
+ Note that the "files" outputted will include anything that is not
399
+ a directory, such as links.
400
+
401
+ Parameters
402
+ ----------
403
+ path: str
404
+ Root to recurse into
405
+ maxdepth: int
406
+ Maximum recursion depth. None means limitless, but not recommended
407
+ on link-based file-systems.
408
+ topdown: bool (True)
409
+ Whether to walk the directory tree from the top downwards or from
410
+ the bottom upwards.
411
+ on_error: "omit", "raise", a callable
412
+ if omit (default), path with exception will simply be empty;
413
+ If raise, an underlying exception will be raised;
414
+ if callable, it will be called with a single OSError instance as argument
415
+ kwargs: passed to ``ls``
416
+ """
417
+ if maxdepth is not None and maxdepth < 1:
418
+ raise ValueError("maxdepth must be at least 1")
419
+
420
+ path = self._strip_protocol(path)
421
+ full_dirs = {}
422
+ dirs = {}
423
+ files = {}
424
+
425
+ detail = kwargs.pop("detail", False)
426
+ try:
427
+ listing = self.ls(path, detail=True, **kwargs)
428
+ except (FileNotFoundError, OSError) as e:
429
+ if on_error == "raise":
430
+ raise
431
+ if callable(on_error):
432
+ on_error(e)
433
+ return
434
+
435
+ for info in listing:
436
+ # each info name must be at least [path]/part , but here
437
+ # we check also for names like [path]/part/
438
+ pathname = info["name"].rstrip("/")
439
+ name = pathname.rsplit("/", 1)[-1]
440
+ if info["type"] == "directory" and pathname != path:
441
+ # do not include "self" path
442
+ full_dirs[name] = pathname
443
+ dirs[name] = info
444
+ elif pathname == path:
445
+ # file-like with same name as give path
446
+ files[""] = info
447
+ else:
448
+ files[name] = info
449
+
450
+ if not detail:
451
+ dirs = list(dirs)
452
+ files = list(files)
453
+
454
+ if topdown:
455
+ # Yield before recursion if walking top down
456
+ yield path, dirs, files
457
+
458
+ if maxdepth is not None:
459
+ maxdepth -= 1
460
+ if maxdepth < 1:
461
+ if not topdown:
462
+ yield path, dirs, files
463
+ return
464
+
465
+ for d in dirs:
466
+ yield from self.walk(
467
+ full_dirs[d],
468
+ maxdepth=maxdepth,
469
+ detail=detail,
470
+ topdown=topdown,
471
+ **kwargs,
472
+ )
473
+
474
+ if not topdown:
475
+ # Yield after recursion if walking bottom up
476
+ yield path, dirs, files
477
+
478
+ def find(self, path, maxdepth=None, withdirs=False, detail=False, **kwargs):
479
+ """List all files below path.
480
+
481
+ Like posix ``find`` command without conditions
482
+
483
+ Parameters
484
+ ----------
485
+ path : str
486
+ maxdepth: int or None
487
+ If not None, the maximum number of levels to descend
488
+ withdirs: bool
489
+ Whether to include directory paths in the output. This is True
490
+ when used by glob, but users usually only want files.
491
+ kwargs are passed to ``ls``.
492
+ """
493
+ # TODO: allow equivalent of -name parameter
494
+ path = self._strip_protocol(path)
495
+ out = {}
496
+
497
+ # Add the root directory if withdirs is requested
498
+ # This is needed for posix glob compliance
499
+ if withdirs and path != "" and self.isdir(path):
500
+ out[path] = self.info(path)
501
+
502
+ for _, dirs, files in self.walk(path, maxdepth, detail=True, **kwargs):
503
+ if withdirs:
504
+ files.update(dirs)
505
+ out.update({info["name"]: info for name, info in files.items()})
506
+ if not out and self.isfile(path):
507
+ # walk works on directories, but find should also return [path]
508
+ # when path happens to be a file
509
+ out[path] = {}
510
+ names = sorted(out)
511
+ if not detail:
512
+ return names
513
+ else:
514
+ return {name: out[name] for name in names}
515
+
516
+ def du(self, path, total=True, maxdepth=None, withdirs=False, **kwargs):
517
+ """Space used by files and optionally directories within a path
518
+
519
+ Directory size does not include the size of its contents.
520
+
521
+ Parameters
522
+ ----------
523
+ path: str
524
+ total: bool
525
+ Whether to sum all the file sizes
526
+ maxdepth: int or None
527
+ Maximum number of directory levels to descend, None for unlimited.
528
+ withdirs: bool
529
+ Whether to include directory paths in the output.
530
+ kwargs: passed to ``find``
531
+
532
+ Returns
533
+ -------
534
+ Dict of {path: size} if total=False, or int otherwise, where numbers
535
+ refer to bytes used.
536
+ """
537
+ sizes = {}
538
+ if withdirs and self.isdir(path):
539
+ # Include top-level directory in output
540
+ info = self.info(path)
541
+ sizes[info["name"]] = info["size"]
542
+ for f in self.find(path, maxdepth=maxdepth, withdirs=withdirs, **kwargs):
543
+ info = self.info(f)
544
+ sizes[info["name"]] = info["size"]
545
+ if total:
546
+ return sum(sizes.values())
547
+ else:
548
+ return sizes
549
+
550
+ def glob(self, path, maxdepth=None, **kwargs):
551
+ """Find files by glob-matching.
552
+
553
+ Pattern matching capabilities for finding files that match the given pattern.
554
+
555
+ Parameters
556
+ ----------
557
+ path: str
558
+ The glob pattern to match against
559
+ maxdepth: int or None
560
+ Maximum depth for ``'**'`` patterns. Applied on the first ``'**'`` found.
561
+ Must be at least 1 if provided.
562
+ kwargs:
563
+ Additional arguments passed to ``find`` (e.g., detail=True)
564
+
565
+ Returns
566
+ -------
567
+ List of matched paths, or dict of paths and their info if detail=True
568
+
569
+ Notes
570
+ -----
571
+ Supported patterns:
572
+ - '*': Matches any sequence of characters within a single directory level
573
+ - ``'**'``: Matches any number of directory levels (must be an entire path component)
574
+ - '?': Matches exactly one character
575
+ - '[abc]': Matches any character in the set
576
+ - '[a-z]': Matches any character in the range
577
+ - '[!abc]': Matches any character NOT in the set
578
+
579
+ Special behaviors:
580
+ - If the path ends with '/', only folders are returned
581
+ - Consecutive '*' characters are compressed into a single '*'
582
+ - Empty brackets '[]' never match anything
583
+ - Negated empty brackets '[!]' match any single character
584
+ - Special characters in character classes are escaped properly
585
+
586
+ Limitations:
587
+ - ``'**'`` must be a complete path component (e.g., ``'a/**/b'``, not ``'a**b'``)
588
+ - No brace expansion ('{a,b}.txt')
589
+ - No extended glob patterns ('+(pattern)', '!(pattern)')
590
+ """
591
+ if maxdepth is not None and maxdepth < 1:
592
+ raise ValueError("maxdepth must be at least 1")
593
+
594
+ import re
595
+
596
+ seps = (os.path.sep, os.path.altsep) if os.path.altsep else (os.path.sep,)
597
+ ends_with_sep = path.endswith(seps) # _strip_protocol strips trailing slash
598
+ path = self._strip_protocol(path)
599
+ append_slash_to_dirname = ends_with_sep or path.endswith(
600
+ tuple(sep + "**" for sep in seps)
601
+ )
602
+ idx_star = path.find("*") if path.find("*") >= 0 else len(path)
603
+ idx_qmark = path.find("?") if path.find("?") >= 0 else len(path)
604
+ idx_brace = path.find("[") if path.find("[") >= 0 else len(path)
605
+
606
+ min_idx = min(idx_star, idx_qmark, idx_brace)
607
+
608
+ detail = kwargs.pop("detail", False)
609
+
610
+ if not has_magic(path):
611
+ if self.exists(path, **kwargs):
612
+ if not detail:
613
+ return [path]
614
+ else:
615
+ return {path: self.info(path, **kwargs)}
616
+ else:
617
+ if not detail:
618
+ return [] # glob of non-existent returns empty
619
+ else:
620
+ return {}
621
+ elif "/" in path[:min_idx]:
622
+ min_idx = path[:min_idx].rindex("/")
623
+ root = path[: min_idx + 1]
624
+ depth = path[min_idx + 1 :].count("/") + 1
625
+ else:
626
+ root = ""
627
+ depth = path[min_idx + 1 :].count("/") + 1
628
+
629
+ if "**" in path:
630
+ if maxdepth is not None:
631
+ idx_double_stars = path.find("**")
632
+ depth_double_stars = path[idx_double_stars:].count("/") + 1
633
+ depth = depth - depth_double_stars + maxdepth
634
+ else:
635
+ depth = None
636
+
637
+ allpaths = self.find(root, maxdepth=depth, withdirs=True, detail=True, **kwargs)
638
+
639
+ pattern = glob_translate(path + ("/" if ends_with_sep else ""))
640
+ pattern = re.compile(pattern)
641
+
642
+ out = {
643
+ p: info
644
+ for p, info in sorted(allpaths.items())
645
+ if pattern.match(
646
+ p + "/"
647
+ if append_slash_to_dirname and info["type"] == "directory"
648
+ else p
649
+ )
650
+ }
651
+
652
+ if detail:
653
+ return out
654
+ else:
655
+ return list(out)
656
+
657
+ def exists(self, path, **kwargs):
658
+ """Is there a file at the given path"""
659
+ try:
660
+ self.info(path, **kwargs)
661
+ return True
662
+ except: # noqa: E722
663
+ # any exception allowed bar FileNotFoundError?
664
+ return False
665
+
666
+ def lexists(self, path, **kwargs):
667
+ """If there is a file at the given path (including
668
+ broken links)"""
669
+ return self.exists(path)
670
+
671
+ def info(self, path, **kwargs):
672
+ """Give details of entry at path
673
+
674
+ Returns a single dictionary, with exactly the same information as ``ls``
675
+ would with ``detail=True``.
676
+
677
+ The default implementation calls ls and could be overridden by a
678
+ shortcut. kwargs are passed on to ```ls()``.
679
+
680
+ Some file systems might not be able to measure the file's size, in
681
+ which case, the returned dict will include ``'size': None``.
682
+
683
+ Returns
684
+ -------
685
+ dict with keys: name (full path in the FS), size (in bytes), type (file,
686
+ directory, or something else) and other FS-specific keys.
687
+ """
688
+ path = self._strip_protocol(path)
689
+ out = self.ls(self._parent(path), detail=True, **kwargs)
690
+ out = [o for o in out if o["name"].rstrip("/") == path]
691
+ if out:
692
+ return out[0]
693
+ out = self.ls(path, detail=True, **kwargs)
694
+ path = path.rstrip("/")
695
+ out1 = [o for o in out if o["name"].rstrip("/") == path]
696
+ if len(out1) == 1:
697
+ if "size" not in out1[0]:
698
+ out1[0]["size"] = None
699
+ return out1[0]
700
+ elif len(out1) > 1 or out:
701
+ return {"name": path, "size": 0, "type": "directory"}
702
+ else:
703
+ raise FileNotFoundError(path)
704
+
705
+ def checksum(self, path):
706
+ """Unique value for current version of file
707
+
708
+ If the checksum is the same from one moment to another, the contents
709
+ are guaranteed to be the same. If the checksum changes, the contents
710
+ *might* have changed.
711
+
712
+ This should normally be overridden; default will probably capture
713
+ creation/modification timestamp (which would be good) or maybe
714
+ access timestamp (which would be bad)
715
+ """
716
+ return int(tokenize(self.info(path)), 16)
717
+
718
+ def size(self, path):
719
+ """Size in bytes of file"""
720
+ return self.info(path).get("size", None)
721
+
722
+ def sizes(self, paths):
723
+ """Size in bytes of each file in a list of paths"""
724
+ return [self.size(p) for p in paths]
725
+
726
+ def isdir(self, path):
727
+ """Is this entry directory-like?"""
728
+ try:
729
+ return self.info(path)["type"] == "directory"
730
+ except OSError:
731
+ return False
732
+
733
+ def isfile(self, path):
734
+ """Is this entry file-like?"""
735
+ try:
736
+ return self.info(path)["type"] == "file"
737
+ except: # noqa: E722
738
+ return False
739
+
740
+ def read_text(self, path, encoding=None, errors=None, newline=None, **kwargs):
741
+ """Get the contents of the file as a string.
742
+
743
+ Parameters
744
+ ----------
745
+ path: str
746
+ URL of file on this filesystems
747
+ encoding, errors, newline: same as `open`.
748
+ """
749
+ with self.open(
750
+ path,
751
+ mode="r",
752
+ encoding=encoding,
753
+ errors=errors,
754
+ newline=newline,
755
+ **kwargs,
756
+ ) as f:
757
+ return f.read()
758
+
759
+ def write_text(
760
+ self, path, value, encoding=None, errors=None, newline=None, **kwargs
761
+ ):
762
+ """Write the text to the given file.
763
+
764
+ An existing file will be overwritten.
765
+
766
+ Parameters
767
+ ----------
768
+ path: str
769
+ URL of file on this filesystems
770
+ value: str
771
+ Text to write.
772
+ encoding, errors, newline: same as `open`.
773
+ """
774
+ with self.open(
775
+ path,
776
+ mode="w",
777
+ encoding=encoding,
778
+ errors=errors,
779
+ newline=newline,
780
+ **kwargs,
781
+ ) as f:
782
+ return f.write(value)
783
+
784
+ def cat_file(self, path, start=None, end=None, **kwargs):
785
+ """Get the content of a file
786
+
787
+ Parameters
788
+ ----------
789
+ path: URL of file on this filesystems
790
+ start, end: int
791
+ Bytes limits of the read. If negative, backwards from end,
792
+ like usual python slices. Either can be None for start or
793
+ end of file, respectively
794
+ kwargs: passed to ``open()``.
795
+ """
796
+ # explicitly set buffering off?
797
+ with self.open(path, "rb", **kwargs) as f:
798
+ if start is not None:
799
+ if start >= 0:
800
+ f.seek(start)
801
+ else:
802
+ f.seek(max(0, f.size + start))
803
+ if end is not None:
804
+ if end < 0:
805
+ end = f.size + end
806
+ return f.read(end - f.tell())
807
+ return f.read()
808
+
809
+ def pipe_file(self, path, value, mode="overwrite", **kwargs):
810
+ """Set the bytes of given file"""
811
+ if mode == "create" and self.exists(path):
812
+ # non-atomic but simple way; or could use "xb" in open(), which is likely
813
+ # not as well supported
814
+ raise FileExistsError
815
+ with self.open(path, "wb", **kwargs) as f:
816
+ f.write(value)
817
+
818
+ def pipe(self, path, value=None, **kwargs):
819
+ """Put value into path
820
+
821
+ (counterpart to ``cat``)
822
+
823
+ Parameters
824
+ ----------
825
+ path: string or dict(str, bytes)
826
+ If a string, a single remote location to put ``value`` bytes; if a dict,
827
+ a mapping of {path: bytesvalue}.
828
+ value: bytes, optional
829
+ If using a single path, these are the bytes to put there. Ignored if
830
+ ``path`` is a dict
831
+ """
832
+ if isinstance(path, str):
833
+ self.pipe_file(self._strip_protocol(path), value, **kwargs)
834
+ elif isinstance(path, dict):
835
+ for k, v in path.items():
836
+ self.pipe_file(self._strip_protocol(k), v, **kwargs)
837
+ else:
838
+ raise ValueError("path must be str or dict")
839
+
840
+ def cat_ranges(
841
+ self, paths, starts, ends, max_gap=None, on_error="return", **kwargs
842
+ ):
843
+ """Get the contents of byte ranges from one or more files
844
+
845
+ Parameters
846
+ ----------
847
+ paths: list
848
+ A list of of filepaths on this filesystems
849
+ starts, ends: int or list
850
+ Bytes limits of the read. If using a single int, the same value will be
851
+ used to read all the specified files.
852
+ """
853
+ if max_gap is not None:
854
+ raise NotImplementedError
855
+ if not isinstance(paths, list):
856
+ raise TypeError
857
+ if not isinstance(starts, list):
858
+ starts = [starts] * len(paths)
859
+ if not isinstance(ends, list):
860
+ ends = [ends] * len(paths)
861
+ if len(starts) != len(paths) or len(ends) != len(paths):
862
+ raise ValueError
863
+ out = []
864
+ for p, s, e in zip(paths, starts, ends):
865
+ try:
866
+ out.append(self.cat_file(p, s, e))
867
+ except Exception as e:
868
+ if on_error == "return":
869
+ out.append(e)
870
+ else:
871
+ raise
872
+ return out
873
+
874
+ def cat(self, path, recursive=False, on_error="raise", **kwargs):
875
+ """Fetch (potentially multiple) paths' contents
876
+
877
+ Parameters
878
+ ----------
879
+ recursive: bool
880
+ If True, assume the path(s) are directories, and get all the
881
+ contained files
882
+ on_error : "raise", "omit", "return"
883
+ If raise, an underlying exception will be raised (converted to KeyError
884
+ if the type is in self.missing_exceptions); if omit, keys with exception
885
+ will simply not be included in the output; if "return", all keys are
886
+ included in the output, but the value will be bytes or an exception
887
+ instance.
888
+ kwargs: passed to cat_file
889
+
890
+ Returns
891
+ -------
892
+ dict of {path: contents} if there are multiple paths
893
+ or the path has been otherwise expanded
894
+ """
895
+ paths = self.expand_path(path, recursive=recursive)
896
+ if (
897
+ len(paths) > 1
898
+ or isinstance(path, list)
899
+ or paths[0] != self._strip_protocol(path)
900
+ ):
901
+ out = {}
902
+ for path in paths:
903
+ try:
904
+ out[path] = self.cat_file(path, **kwargs)
905
+ except Exception as e:
906
+ if on_error == "raise":
907
+ raise
908
+ if on_error == "return":
909
+ out[path] = e
910
+ return out
911
+ else:
912
+ return self.cat_file(paths[0], **kwargs)
913
+
914
+ def get_file(self, rpath, lpath, callback=DEFAULT_CALLBACK, outfile=None, **kwargs):
915
+ """Copy single remote file to local"""
916
+ from .implementations.local import LocalFileSystem
917
+
918
+ if isfilelike(lpath):
919
+ outfile = lpath
920
+ elif self.isdir(rpath):
921
+ os.makedirs(lpath, exist_ok=True)
922
+ return None
923
+
924
+ fs = LocalFileSystem(auto_mkdir=True)
925
+ fs.makedirs(fs._parent(lpath), exist_ok=True)
926
+
927
+ with self.open(rpath, "rb", **kwargs) as f1:
928
+ if outfile is None:
929
+ outfile = open(lpath, "wb")
930
+
931
+ try:
932
+ callback.set_size(getattr(f1, "size", None))
933
+ data = True
934
+ while data:
935
+ data = f1.read(self.blocksize)
936
+ segment_len = outfile.write(data)
937
+ if segment_len is None:
938
+ segment_len = len(data)
939
+ callback.relative_update(segment_len)
940
+ finally:
941
+ if not isfilelike(lpath):
942
+ outfile.close()
943
+
944
+ def get(
945
+ self,
946
+ rpath,
947
+ lpath,
948
+ recursive=False,
949
+ callback=DEFAULT_CALLBACK,
950
+ maxdepth=None,
951
+ **kwargs,
952
+ ):
953
+ """Copy file(s) to local.
954
+
955
+ Copies a specific file or tree of files (if recursive=True). If lpath
956
+ ends with a "/", it will be assumed to be a directory, and target files
957
+ will go within. Can submit a list of paths, which may be glob-patterns
958
+ and will be expanded.
959
+
960
+ Calls get_file for each source.
961
+ """
962
+ if isinstance(lpath, list) and isinstance(rpath, list):
963
+ # No need to expand paths when both source and destination
964
+ # are provided as lists
965
+ rpaths = rpath
966
+ lpaths = lpath
967
+ else:
968
+ from .implementations.local import (
969
+ LocalFileSystem,
970
+ make_path_posix,
971
+ trailing_sep,
972
+ )
973
+
974
+ source_is_str = isinstance(rpath, str)
975
+ rpaths = self.expand_path(rpath, recursive=recursive, maxdepth=maxdepth)
976
+ if source_is_str and (not recursive or maxdepth is not None):
977
+ # Non-recursive glob does not copy directories
978
+ rpaths = [p for p in rpaths if not (trailing_sep(p) or self.isdir(p))]
979
+ if not rpaths:
980
+ return
981
+
982
+ if isinstance(lpath, str):
983
+ lpath = make_path_posix(lpath)
984
+
985
+ source_is_file = len(rpaths) == 1
986
+ dest_is_dir = isinstance(lpath, str) and (
987
+ trailing_sep(lpath) or LocalFileSystem().isdir(lpath)
988
+ )
989
+
990
+ exists = source_is_str and (
991
+ (has_magic(rpath) and source_is_file)
992
+ or (not has_magic(rpath) and dest_is_dir and not trailing_sep(rpath))
993
+ )
994
+ lpaths = other_paths(
995
+ rpaths,
996
+ lpath,
997
+ exists=exists,
998
+ flatten=not source_is_str,
999
+ )
1000
+
1001
+ callback.set_size(len(lpaths))
1002
+ for lpath, rpath in callback.wrap(zip(lpaths, rpaths)):
1003
+ with callback.branched(rpath, lpath) as child:
1004
+ self.get_file(rpath, lpath, callback=child, **kwargs)
1005
+
1006
+ def put_file(
1007
+ self, lpath, rpath, callback=DEFAULT_CALLBACK, mode="overwrite", **kwargs
1008
+ ):
1009
+ """Copy single file to remote"""
1010
+ if mode == "create" and self.exists(rpath):
1011
+ raise FileExistsError
1012
+ if os.path.isdir(lpath):
1013
+ self.makedirs(rpath, exist_ok=True)
1014
+ return None
1015
+
1016
+ with open(lpath, "rb") as f1:
1017
+ size = f1.seek(0, 2)
1018
+ callback.set_size(size)
1019
+ f1.seek(0)
1020
+
1021
+ self.mkdirs(self._parent(os.fspath(rpath)), exist_ok=True)
1022
+ with self.open(rpath, "wb", **kwargs) as f2:
1023
+ while f1.tell() < size:
1024
+ data = f1.read(self.blocksize)
1025
+ segment_len = f2.write(data)
1026
+ if segment_len is None:
1027
+ segment_len = len(data)
1028
+ callback.relative_update(segment_len)
1029
+
1030
+ def put(
1031
+ self,
1032
+ lpath,
1033
+ rpath,
1034
+ recursive=False,
1035
+ callback=DEFAULT_CALLBACK,
1036
+ maxdepth=None,
1037
+ **kwargs,
1038
+ ):
1039
+ """Copy file(s) from local.
1040
+
1041
+ Copies a specific file or tree of files (if recursive=True). If rpath
1042
+ ends with a "/", it will be assumed to be a directory, and target files
1043
+ will go within.
1044
+
1045
+ Calls put_file for each source.
1046
+ """
1047
+ if isinstance(lpath, list) and isinstance(rpath, list):
1048
+ # No need to expand paths when both source and destination
1049
+ # are provided as lists
1050
+ rpaths = rpath
1051
+ lpaths = lpath
1052
+ else:
1053
+ from .implementations.local import (
1054
+ LocalFileSystem,
1055
+ make_path_posix,
1056
+ trailing_sep,
1057
+ )
1058
+
1059
+ source_is_str = isinstance(lpath, str)
1060
+ if source_is_str:
1061
+ lpath = make_path_posix(lpath)
1062
+ fs = LocalFileSystem()
1063
+ lpaths = fs.expand_path(lpath, recursive=recursive, maxdepth=maxdepth)
1064
+ if source_is_str and (not recursive or maxdepth is not None):
1065
+ # Non-recursive glob does not copy directories
1066
+ lpaths = [p for p in lpaths if not (trailing_sep(p) or fs.isdir(p))]
1067
+ if not lpaths:
1068
+ return
1069
+
1070
+ source_is_file = len(lpaths) == 1
1071
+ dest_is_dir = isinstance(rpath, str) and (
1072
+ trailing_sep(rpath) or self.isdir(rpath)
1073
+ )
1074
+
1075
+ rpath = (
1076
+ self._strip_protocol(rpath)
1077
+ if isinstance(rpath, str)
1078
+ else [self._strip_protocol(p) for p in rpath]
1079
+ )
1080
+ exists = source_is_str and (
1081
+ (has_magic(lpath) and source_is_file)
1082
+ or (not has_magic(lpath) and dest_is_dir and not trailing_sep(lpath))
1083
+ )
1084
+ rpaths = other_paths(
1085
+ lpaths,
1086
+ rpath,
1087
+ exists=exists,
1088
+ flatten=not source_is_str,
1089
+ )
1090
+
1091
+ callback.set_size(len(rpaths))
1092
+ for lpath, rpath in callback.wrap(zip(lpaths, rpaths)):
1093
+ with callback.branched(lpath, rpath) as child:
1094
+ self.put_file(lpath, rpath, callback=child, **kwargs)
1095
+
1096
+ def head(self, path, size=1024):
1097
+ """Get the first ``size`` bytes from file"""
1098
+ with self.open(path, "rb") as f:
1099
+ return f.read(size)
1100
+
1101
+ def tail(self, path, size=1024):
1102
+ """Get the last ``size`` bytes from file"""
1103
+ with self.open(path, "rb") as f:
1104
+ f.seek(max(-size, -f.size), 2)
1105
+ return f.read()
1106
+
1107
+ def cp_file(self, path1, path2, **kwargs):
1108
+ raise NotImplementedError
1109
+
1110
+ def copy(
1111
+ self, path1, path2, recursive=False, maxdepth=None, on_error=None, **kwargs
1112
+ ):
1113
+ """Copy within two locations in the filesystem
1114
+
1115
+ on_error : "raise", "ignore"
1116
+ If raise, any not-found exceptions will be raised; if ignore any
1117
+ not-found exceptions will cause the path to be skipped; defaults to
1118
+ raise unless recursive is true, where the default is ignore
1119
+ """
1120
+ if on_error is None and recursive:
1121
+ on_error = "ignore"
1122
+ elif on_error is None:
1123
+ on_error = "raise"
1124
+
1125
+ if isinstance(path1, list) and isinstance(path2, list):
1126
+ # No need to expand paths when both source and destination
1127
+ # are provided as lists
1128
+ paths1 = path1
1129
+ paths2 = path2
1130
+ else:
1131
+ from .implementations.local import trailing_sep
1132
+
1133
+ source_is_str = isinstance(path1, str)
1134
+ paths1 = self.expand_path(path1, recursive=recursive, maxdepth=maxdepth)
1135
+ if source_is_str and (not recursive or maxdepth is not None):
1136
+ # Non-recursive glob does not copy directories
1137
+ paths1 = [p for p in paths1 if not (trailing_sep(p) or self.isdir(p))]
1138
+ if not paths1:
1139
+ return
1140
+
1141
+ source_is_file = len(paths1) == 1
1142
+ dest_is_dir = isinstance(path2, str) and (
1143
+ trailing_sep(path2) or self.isdir(path2)
1144
+ )
1145
+
1146
+ exists = source_is_str and (
1147
+ (has_magic(path1) and source_is_file)
1148
+ or (not has_magic(path1) and dest_is_dir and not trailing_sep(path1))
1149
+ )
1150
+ paths2 = other_paths(
1151
+ paths1,
1152
+ path2,
1153
+ exists=exists,
1154
+ flatten=not source_is_str,
1155
+ )
1156
+
1157
+ for p1, p2 in zip(paths1, paths2):
1158
+ try:
1159
+ self.cp_file(p1, p2, **kwargs)
1160
+ except FileNotFoundError:
1161
+ if on_error == "raise":
1162
+ raise
1163
+
1164
+ def expand_path(self, path, recursive=False, maxdepth=None, **kwargs):
1165
+ """Turn one or more globs or directories into a list of all matching paths
1166
+ to files or directories.
1167
+
1168
+ kwargs are passed to ``glob`` or ``find``, which may in turn call ``ls``
1169
+ """
1170
+
1171
+ if maxdepth is not None and maxdepth < 1:
1172
+ raise ValueError("maxdepth must be at least 1")
1173
+
1174
+ if isinstance(path, (str, os.PathLike)):
1175
+ out = self.expand_path([path], recursive, maxdepth)
1176
+ else:
1177
+ out = set()
1178
+ path = [self._strip_protocol(p) for p in path]
1179
+ for p in path:
1180
+ if has_magic(p):
1181
+ bit = set(self.glob(p, maxdepth=maxdepth, **kwargs))
1182
+ out |= bit
1183
+ if recursive:
1184
+ # glob call above expanded one depth so if maxdepth is defined
1185
+ # then decrement it in expand_path call below. If it is zero
1186
+ # after decrementing then avoid expand_path call.
1187
+ if maxdepth is not None and maxdepth <= 1:
1188
+ continue
1189
+ out |= set(
1190
+ self.expand_path(
1191
+ list(bit),
1192
+ recursive=recursive,
1193
+ maxdepth=maxdepth - 1 if maxdepth is not None else None,
1194
+ **kwargs,
1195
+ )
1196
+ )
1197
+ continue
1198
+ elif recursive:
1199
+ rec = set(
1200
+ self.find(
1201
+ p, maxdepth=maxdepth, withdirs=True, detail=False, **kwargs
1202
+ )
1203
+ )
1204
+ out |= rec
1205
+ if p not in out and (recursive is False or self.exists(p)):
1206
+ # should only check once, for the root
1207
+ out.add(p)
1208
+ if not out:
1209
+ raise FileNotFoundError(path)
1210
+ return sorted(out)
1211
+
1212
+ def mv(self, path1, path2, recursive=False, maxdepth=None, **kwargs):
1213
+ """Move file(s) from one location to another"""
1214
+ if path1 == path2:
1215
+ logger.debug("%s mv: The paths are the same, so no files were moved.", self)
1216
+ else:
1217
+ # explicitly raise exception to prevent data corruption
1218
+ self.copy(
1219
+ path1, path2, recursive=recursive, maxdepth=maxdepth, onerror="raise"
1220
+ )
1221
+ self.rm(path1, recursive=recursive)
1222
+
1223
+ def rm_file(self, path):
1224
+ """Delete a file"""
1225
+ self._rm(path)
1226
+
1227
+ def _rm(self, path):
1228
+ """Delete one file"""
1229
+ # this is the old name for the method, prefer rm_file
1230
+ raise NotImplementedError
1231
+
1232
+ def rm(self, path, recursive=False, maxdepth=None):
1233
+ """Delete files.
1234
+
1235
+ Parameters
1236
+ ----------
1237
+ path: str or list of str
1238
+ File(s) to delete.
1239
+ recursive: bool
1240
+ If file(s) are directories, recursively delete contents and then
1241
+ also remove the directory
1242
+ maxdepth: int or None
1243
+ Depth to pass to walk for finding files to delete, if recursive.
1244
+ If None, there will be no limit and infinite recursion may be
1245
+ possible.
1246
+ """
1247
+ path = self.expand_path(path, recursive=recursive, maxdepth=maxdepth)
1248
+ for p in reversed(path):
1249
+ self.rm_file(p)
1250
+
1251
+ @classmethod
1252
+ def _parent(cls, path):
1253
+ path = cls._strip_protocol(path)
1254
+ if "/" in path:
1255
+ parent = path.rsplit("/", 1)[0].lstrip(cls.root_marker)
1256
+ return cls.root_marker + parent
1257
+ else:
1258
+ return cls.root_marker
1259
+
1260
+ def _open(
1261
+ self,
1262
+ path,
1263
+ mode="rb",
1264
+ block_size=None,
1265
+ autocommit=True,
1266
+ cache_options=None,
1267
+ **kwargs,
1268
+ ):
1269
+ """Return raw bytes-mode file-like from the file-system"""
1270
+ return AbstractBufferedFile(
1271
+ self,
1272
+ path,
1273
+ mode,
1274
+ block_size,
1275
+ autocommit,
1276
+ cache_options=cache_options,
1277
+ **kwargs,
1278
+ )
1279
+
1280
+ def open(
1281
+ self,
1282
+ path,
1283
+ mode="rb",
1284
+ block_size=None,
1285
+ cache_options=None,
1286
+ compression=None,
1287
+ **kwargs,
1288
+ ):
1289
+ """
1290
+ Return a file-like object from the filesystem
1291
+
1292
+ The resultant instance must function correctly in a context ``with``
1293
+ block.
1294
+
1295
+ Parameters
1296
+ ----------
1297
+ path: str
1298
+ Target file
1299
+ mode: str like 'rb', 'w'
1300
+ See builtin ``open()``
1301
+ Mode "x" (exclusive write) may be implemented by the backend. Even if
1302
+ it is, whether it is checked up front or on commit, and whether it is
1303
+ atomic is implementation-dependent.
1304
+ block_size: int
1305
+ Some indication of buffering - this is a value in bytes
1306
+ cache_options : dict, optional
1307
+ Extra arguments to pass through to the cache.
1308
+ compression: string or None
1309
+ If given, open file using compression codec. Can either be a compression
1310
+ name (a key in ``fsspec.compression.compr``) or "infer" to guess the
1311
+ compression from the filename suffix.
1312
+ encoding, errors, newline: passed on to TextIOWrapper for text mode
1313
+ """
1314
+ import io
1315
+
1316
+ path = self._strip_protocol(path)
1317
+ if "b" not in mode:
1318
+ mode = mode.replace("t", "") + "b"
1319
+
1320
+ text_kwargs = {
1321
+ k: kwargs.pop(k)
1322
+ for k in ["encoding", "errors", "newline"]
1323
+ if k in kwargs
1324
+ }
1325
+ return io.TextIOWrapper(
1326
+ self.open(
1327
+ path,
1328
+ mode,
1329
+ block_size=block_size,
1330
+ cache_options=cache_options,
1331
+ compression=compression,
1332
+ **kwargs,
1333
+ ),
1334
+ **text_kwargs,
1335
+ )
1336
+ else:
1337
+ ac = kwargs.pop("autocommit", not self._intrans)
1338
+ f = self._open(
1339
+ path,
1340
+ mode=mode,
1341
+ block_size=block_size,
1342
+ autocommit=ac,
1343
+ cache_options=cache_options,
1344
+ **kwargs,
1345
+ )
1346
+ if compression is not None:
1347
+ from fsspec.compression import compr
1348
+ from fsspec.core import get_compression
1349
+
1350
+ compression = get_compression(path, compression)
1351
+ compress = compr[compression]
1352
+ f = compress(f, mode=mode[0])
1353
+
1354
+ if not ac and "r" not in mode:
1355
+ self.transaction.files.append(f)
1356
+ return f
1357
+
1358
+ def touch(self, path, truncate=True, **kwargs):
1359
+ """Create empty file, or update timestamp
1360
+
1361
+ Parameters
1362
+ ----------
1363
+ path: str
1364
+ file location
1365
+ truncate: bool
1366
+ If True, always set file size to 0; if False, update timestamp and
1367
+ leave file unchanged, if backend allows this
1368
+ """
1369
+ if truncate or not self.exists(path):
1370
+ with self.open(path, "wb", **kwargs):
1371
+ pass
1372
+ else:
1373
+ raise NotImplementedError # update timestamp, if possible
1374
+
1375
+ def ukey(self, path):
1376
+ """Hash of file properties, to tell if it has changed"""
1377
+ return sha256(str(self.info(path)).encode()).hexdigest()
1378
+
1379
+ def read_block(self, fn, offset, length, delimiter=None):
1380
+ """Read a block of bytes from
1381
+
1382
+ Starting at ``offset`` of the file, read ``length`` bytes. If
1383
+ ``delimiter`` is set then we ensure that the read starts and stops at
1384
+ delimiter boundaries that follow the locations ``offset`` and ``offset
1385
+ + length``. If ``offset`` is zero then we start at zero. The
1386
+ bytestring returned WILL include the end delimiter string.
1387
+
1388
+ If offset+length is beyond the eof, reads to eof.
1389
+
1390
+ Parameters
1391
+ ----------
1392
+ fn: string
1393
+ Path to filename
1394
+ offset: int
1395
+ Byte offset to start read
1396
+ length: int
1397
+ Number of bytes to read. If None, read to end.
1398
+ delimiter: bytes (optional)
1399
+ Ensure reading starts and stops at delimiter bytestring
1400
+
1401
+ Examples
1402
+ --------
1403
+ >>> fs.read_block('data/file.csv', 0, 13) # doctest: +SKIP
1404
+ b'Alice, 100\\nBo'
1405
+ >>> fs.read_block('data/file.csv', 0, 13, delimiter=b'\\n') # doctest: +SKIP
1406
+ b'Alice, 100\\nBob, 200\\n'
1407
+
1408
+ Use ``length=None`` to read to the end of the file.
1409
+ >>> fs.read_block('data/file.csv', 0, None, delimiter=b'\\n') # doctest: +SKIP
1410
+ b'Alice, 100\\nBob, 200\\nCharlie, 300'
1411
+
1412
+ See Also
1413
+ --------
1414
+ :func:`fsspec.utils.read_block`
1415
+ """
1416
+ with self.open(fn, "rb") as f:
1417
+ size = f.size
1418
+ if length is None:
1419
+ length = size
1420
+ if size is not None and offset + length > size:
1421
+ length = size - offset
1422
+ return read_block(f, offset, length, delimiter)
1423
+
1424
+ def to_json(self, *, include_password: bool = True) -> str:
1425
+ """
1426
+ JSON representation of this filesystem instance.
1427
+
1428
+ Parameters
1429
+ ----------
1430
+ include_password: bool, default True
1431
+ Whether to include the password (if any) in the output.
1432
+
1433
+ Returns
1434
+ -------
1435
+ JSON string with keys ``cls`` (the python location of this class),
1436
+ protocol (text name of this class's protocol, first one in case of
1437
+ multiple), ``args`` (positional args, usually empty), and all other
1438
+ keyword arguments as their own keys.
1439
+
1440
+ Warnings
1441
+ --------
1442
+ Serialized filesystems may contain sensitive information which have been
1443
+ passed to the constructor, such as passwords and tokens. Make sure you
1444
+ store and send them in a secure environment!
1445
+ """
1446
+ from .json import FilesystemJSONEncoder
1447
+
1448
+ return json.dumps(
1449
+ self,
1450
+ cls=type(
1451
+ "_FilesystemJSONEncoder",
1452
+ (FilesystemJSONEncoder,),
1453
+ {"include_password": include_password},
1454
+ ),
1455
+ )
1456
+
1457
+ @staticmethod
1458
+ def from_json(blob: str) -> AbstractFileSystem:
1459
+ """
1460
+ Recreate a filesystem instance from JSON representation.
1461
+
1462
+ See ``.to_json()`` for the expected structure of the input.
1463
+
1464
+ Parameters
1465
+ ----------
1466
+ blob: str
1467
+
1468
+ Returns
1469
+ -------
1470
+ file system instance, not necessarily of this particular class.
1471
+
1472
+ Warnings
1473
+ --------
1474
+ This can import arbitrary modules (as determined by the ``cls`` key).
1475
+ Make sure you haven't installed any modules that may execute malicious code
1476
+ at import time.
1477
+ """
1478
+ from .json import FilesystemJSONDecoder
1479
+
1480
+ return json.loads(blob, cls=FilesystemJSONDecoder)
1481
+
1482
+ def to_dict(self, *, include_password: bool = True) -> dict[str, Any]:
1483
+ """
1484
+ JSON-serializable dictionary representation of this filesystem instance.
1485
+
1486
+ Parameters
1487
+ ----------
1488
+ include_password: bool, default True
1489
+ Whether to include the password (if any) in the output.
1490
+
1491
+ Returns
1492
+ -------
1493
+ Dictionary with keys ``cls`` (the python location of this class),
1494
+ protocol (text name of this class's protocol, first one in case of
1495
+ multiple), ``args`` (positional args, usually empty), and all other
1496
+ keyword arguments as their own keys.
1497
+
1498
+ Warnings
1499
+ --------
1500
+ Serialized filesystems may contain sensitive information which have been
1501
+ passed to the constructor, such as passwords and tokens. Make sure you
1502
+ store and send them in a secure environment!
1503
+ """
1504
+ from .json import FilesystemJSONEncoder
1505
+
1506
+ json_encoder = FilesystemJSONEncoder()
1507
+
1508
+ cls = type(self)
1509
+ proto = self.protocol
1510
+
1511
+ storage_options = dict(self.storage_options)
1512
+ if not include_password:
1513
+ storage_options.pop("password", None)
1514
+
1515
+ return dict(
1516
+ cls=f"{cls.__module__}:{cls.__name__}",
1517
+ protocol=proto[0] if isinstance(proto, (tuple, list)) else proto,
1518
+ args=json_encoder.make_serializable(self.storage_args),
1519
+ **json_encoder.make_serializable(storage_options),
1520
+ )
1521
+
1522
+ @staticmethod
1523
+ def from_dict(dct: dict[str, Any]) -> AbstractFileSystem:
1524
+ """
1525
+ Recreate a filesystem instance from dictionary representation.
1526
+
1527
+ See ``.to_dict()`` for the expected structure of the input.
1528
+
1529
+ Parameters
1530
+ ----------
1531
+ dct: Dict[str, Any]
1532
+
1533
+ Returns
1534
+ -------
1535
+ file system instance, not necessarily of this particular class.
1536
+
1537
+ Warnings
1538
+ --------
1539
+ This can import arbitrary modules (as determined by the ``cls`` key).
1540
+ Make sure you haven't installed any modules that may execute malicious code
1541
+ at import time.
1542
+ """
1543
+ from .json import FilesystemJSONDecoder
1544
+
1545
+ json_decoder = FilesystemJSONDecoder()
1546
+
1547
+ dct = dict(dct) # Defensive copy
1548
+
1549
+ cls = FilesystemJSONDecoder.try_resolve_fs_cls(dct)
1550
+ if cls is None:
1551
+ raise ValueError("Not a serialized AbstractFileSystem")
1552
+
1553
+ dct.pop("cls", None)
1554
+ dct.pop("protocol", None)
1555
+
1556
+ return cls(
1557
+ *json_decoder.unmake_serializable(dct.pop("args", ())),
1558
+ **json_decoder.unmake_serializable(dct),
1559
+ )
1560
+
1561
+ def _get_pyarrow_filesystem(self):
1562
+ """
1563
+ Make a version of the FS instance which will be acceptable to pyarrow
1564
+ """
1565
+ # all instances already also derive from pyarrow
1566
+ return self
1567
+
1568
+ def get_mapper(self, root="", check=False, create=False, missing_exceptions=None):
1569
+ """Create key/value store based on this file-system
1570
+
1571
+ Makes a MutableMapping interface to the FS at the given root path.
1572
+ See ``fsspec.mapping.FSMap`` for further details.
1573
+ """
1574
+ from .mapping import FSMap
1575
+
1576
+ return FSMap(
1577
+ root,
1578
+ self,
1579
+ check=check,
1580
+ create=create,
1581
+ missing_exceptions=missing_exceptions,
1582
+ )
1583
+
1584
+ @classmethod
1585
+ def clear_instance_cache(cls):
1586
+ """
1587
+ Clear the cache of filesystem instances.
1588
+
1589
+ Notes
1590
+ -----
1591
+ Unless overridden by setting the ``cachable`` class attribute to False,
1592
+ the filesystem class stores a reference to newly created instances. This
1593
+ prevents Python's normal rules around garbage collection from working,
1594
+ since the instances refcount will not drop to zero until
1595
+ ``clear_instance_cache`` is called.
1596
+ """
1597
+ cls._cache.clear()
1598
+
1599
+ def created(self, path):
1600
+ """Return the created timestamp of a file as a datetime.datetime"""
1601
+ raise NotImplementedError
1602
+
1603
+ def modified(self, path):
1604
+ """Return the modified timestamp of a file as a datetime.datetime"""
1605
+ raise NotImplementedError
1606
+
1607
+ def tree(
1608
+ self,
1609
+ path: str = "/",
1610
+ recursion_limit: int = 2,
1611
+ max_display: int = 25,
1612
+ display_size: bool = False,
1613
+ prefix: str = "",
1614
+ is_last: bool = True,
1615
+ first: bool = True,
1616
+ indent_size: int = 4,
1617
+ ) -> str:
1618
+ """
1619
+ Return a tree-like structure of the filesystem starting from the given path as a string.
1620
+
1621
+ Parameters
1622
+ ----------
1623
+ path: Root path to start traversal from
1624
+ recursion_limit: Maximum depth of directory traversal
1625
+ max_display: Maximum number of items to display per directory
1626
+ display_size: Whether to display file sizes
1627
+ prefix: Current line prefix for visual tree structure
1628
+ is_last: Whether current item is last in its level
1629
+ first: Whether this is the first call (displays root path)
1630
+ indent_size: Number of spaces by indent
1631
+
1632
+ Returns
1633
+ -------
1634
+ str: A string representing the tree structure.
1635
+
1636
+ Example
1637
+ -------
1638
+ >>> from fsspec import filesystem
1639
+
1640
+ >>> fs = filesystem('ftp', host='test.rebex.net', user='demo', password='password')
1641
+ >>> tree = fs.tree(display_size=True, recursion_limit=3, indent_size=8, max_display=10)
1642
+ >>> print(tree)
1643
+ """
1644
+
1645
+ def format_bytes(n: int) -> str:
1646
+ """Format bytes as text."""
1647
+ for prefix, k in (
1648
+ ("P", 2**50),
1649
+ ("T", 2**40),
1650
+ ("G", 2**30),
1651
+ ("M", 2**20),
1652
+ ("k", 2**10),
1653
+ ):
1654
+ if n >= 0.9 * k:
1655
+ return f"{n / k:.2f} {prefix}b"
1656
+ return f"{n}B"
1657
+
1658
+ result = []
1659
+
1660
+ if first:
1661
+ result.append(path)
1662
+
1663
+ if recursion_limit:
1664
+ indent = " " * indent_size
1665
+ contents = self.ls(path, detail=True)
1666
+ contents.sort(
1667
+ key=lambda x: (x.get("type") != "directory", x.get("name", ""))
1668
+ )
1669
+
1670
+ if max_display is not None and len(contents) > max_display:
1671
+ displayed_contents = contents[:max_display]
1672
+ remaining_count = len(contents) - max_display
1673
+ else:
1674
+ displayed_contents = contents
1675
+ remaining_count = 0
1676
+
1677
+ for i, item in enumerate(displayed_contents):
1678
+ is_last_item = (i == len(displayed_contents) - 1) and (
1679
+ remaining_count == 0
1680
+ )
1681
+
1682
+ branch = (
1683
+ "└" + ("─" * (indent_size - 2))
1684
+ if is_last_item
1685
+ else "├" + ("─" * (indent_size - 2))
1686
+ )
1687
+ branch += " "
1688
+ new_prefix = prefix + (
1689
+ indent if is_last_item else "│" + " " * (indent_size - 1)
1690
+ )
1691
+
1692
+ name = os.path.basename(item.get("name", ""))
1693
+
1694
+ if display_size and item.get("type") == "directory":
1695
+ sub_contents = self.ls(item.get("name", ""), detail=True)
1696
+ num_files = sum(
1697
+ 1 for sub_item in sub_contents if sub_item.get("type") == "file"
1698
+ )
1699
+ num_folders = sum(
1700
+ 1
1701
+ for sub_item in sub_contents
1702
+ if sub_item.get("type") == "directory"
1703
+ )
1704
+
1705
+ if num_files == 0 and num_folders == 0:
1706
+ size = " (empty folder)"
1707
+ elif num_files == 0:
1708
+ size = f" ({num_folders} subfolder{'s' if num_folders > 1 else ''})"
1709
+ elif num_folders == 0:
1710
+ size = f" ({num_files} file{'s' if num_files > 1 else ''})"
1711
+ else:
1712
+ size = f" ({num_files} file{'s' if num_files > 1 else ''}, {num_folders} subfolder{'s' if num_folders > 1 else ''})"
1713
+ elif display_size and item.get("type") == "file":
1714
+ size = f" ({format_bytes(item.get('size', 0))})"
1715
+ else:
1716
+ size = ""
1717
+
1718
+ result.append(f"{prefix}{branch}{name}{size}")
1719
+
1720
+ if item.get("type") == "directory" and recursion_limit > 0:
1721
+ result.append(
1722
+ self.tree(
1723
+ path=item.get("name", ""),
1724
+ recursion_limit=recursion_limit - 1,
1725
+ max_display=max_display,
1726
+ display_size=display_size,
1727
+ prefix=new_prefix,
1728
+ is_last=is_last_item,
1729
+ first=False,
1730
+ indent_size=indent_size,
1731
+ )
1732
+ )
1733
+
1734
+ if remaining_count > 0:
1735
+ more_message = f"{remaining_count} more item(s) not displayed."
1736
+ result.append(
1737
+ f"{prefix}{'└' + ('─' * (indent_size - 2))} {more_message}"
1738
+ )
1739
+
1740
+ return "\n".join(_ for _ in result if _)
1741
+
1742
+ # ------------------------------------------------------------------------
1743
+ # Aliases
1744
+
1745
+ def read_bytes(self, path, start=None, end=None, **kwargs):
1746
+ """Alias of `AbstractFileSystem.cat_file`."""
1747
+ return self.cat_file(path, start=start, end=end, **kwargs)
1748
+
1749
+ def write_bytes(self, path, value, **kwargs):
1750
+ """Alias of `AbstractFileSystem.pipe_file`."""
1751
+ self.pipe_file(path, value, **kwargs)
1752
+
1753
+ def makedir(self, path, create_parents=True, **kwargs):
1754
+ """Alias of `AbstractFileSystem.mkdir`."""
1755
+ return self.mkdir(path, create_parents=create_parents, **kwargs)
1756
+
1757
+ def mkdirs(self, path, exist_ok=False):
1758
+ """Alias of `AbstractFileSystem.makedirs`."""
1759
+ return self.makedirs(path, exist_ok=exist_ok)
1760
+
1761
+ def listdir(self, path, detail=True, **kwargs):
1762
+ """Alias of `AbstractFileSystem.ls`."""
1763
+ return self.ls(path, detail=detail, **kwargs)
1764
+
1765
+ def cp(self, path1, path2, **kwargs):
1766
+ """Alias of `AbstractFileSystem.copy`."""
1767
+ return self.copy(path1, path2, **kwargs)
1768
+
1769
+ def move(self, path1, path2, **kwargs):
1770
+ """Alias of `AbstractFileSystem.mv`."""
1771
+ return self.mv(path1, path2, **kwargs)
1772
+
1773
+ def stat(self, path, **kwargs):
1774
+ """Alias of `AbstractFileSystem.info`."""
1775
+ return self.info(path, **kwargs)
1776
+
1777
+ def disk_usage(self, path, total=True, maxdepth=None, **kwargs):
1778
+ """Alias of `AbstractFileSystem.du`."""
1779
+ return self.du(path, total=total, maxdepth=maxdepth, **kwargs)
1780
+
1781
+ def rename(self, path1, path2, **kwargs):
1782
+ """Alias of `AbstractFileSystem.mv`."""
1783
+ return self.mv(path1, path2, **kwargs)
1784
+
1785
+ def delete(self, path, recursive=False, maxdepth=None):
1786
+ """Alias of `AbstractFileSystem.rm`."""
1787
+ return self.rm(path, recursive=recursive, maxdepth=maxdepth)
1788
+
1789
+ def upload(self, lpath, rpath, recursive=False, **kwargs):
1790
+ """Alias of `AbstractFileSystem.put`."""
1791
+ return self.put(lpath, rpath, recursive=recursive, **kwargs)
1792
+
1793
+ def download(self, rpath, lpath, recursive=False, **kwargs):
1794
+ """Alias of `AbstractFileSystem.get`."""
1795
+ return self.get(rpath, lpath, recursive=recursive, **kwargs)
1796
+
1797
+ def sign(self, path, expiration=100, **kwargs):
1798
+ """Create a signed URL representing the given path
1799
+
1800
+ Some implementations allow temporary URLs to be generated, as a
1801
+ way of delegating credentials.
1802
+
1803
+ Parameters
1804
+ ----------
1805
+ path : str
1806
+ The path on the filesystem
1807
+ expiration : int
1808
+ Number of seconds to enable the URL for (if supported)
1809
+
1810
+ Returns
1811
+ -------
1812
+ URL : str
1813
+ The signed URL
1814
+
1815
+ Raises
1816
+ ------
1817
+ NotImplementedError : if method is not implemented for a filesystem
1818
+ """
1819
+ raise NotImplementedError("Sign is not implemented for this filesystem")
1820
+
1821
+ def _isfilestore(self):
1822
+ # Originally inherited from pyarrow DaskFileSystem. Keeping this
1823
+ # here for backwards compatibility as long as pyarrow uses its
1824
+ # legacy fsspec-compatible filesystems and thus accepts fsspec
1825
+ # filesystems as well
1826
+ return False
1827
+
1828
+
1829
+ class AbstractBufferedFile(io.IOBase):
1830
+ """Convenient class to derive from to provide buffering
1831
+
1832
+ In the case that the backend does not provide a pythonic file-like object
1833
+ already, this class contains much of the logic to build one. The only
1834
+ methods that need to be overridden are ``_upload_chunk``,
1835
+ ``_initiate_upload`` and ``_fetch_range``.
1836
+ """
1837
+
1838
+ DEFAULT_BLOCK_SIZE = 5 * 2**20
1839
+ _details = None
1840
+
1841
+ def __init__(
1842
+ self,
1843
+ fs,
1844
+ path,
1845
+ mode="rb",
1846
+ block_size="default",
1847
+ autocommit=True,
1848
+ cache_type="readahead",
1849
+ cache_options=None,
1850
+ size=None,
1851
+ **kwargs,
1852
+ ):
1853
+ """
1854
+ Template for files with buffered reading and writing
1855
+
1856
+ Parameters
1857
+ ----------
1858
+ fs: instance of FileSystem
1859
+ path: str
1860
+ location in file-system
1861
+ mode: str
1862
+ Normal file modes. Currently only 'wb', 'ab' or 'rb'. Some file
1863
+ systems may be read-only, and some may not support append.
1864
+ block_size: int
1865
+ Buffer size for reading or writing, 'default' for class default
1866
+ autocommit: bool
1867
+ Whether to write to final destination; may only impact what
1868
+ happens when file is being closed.
1869
+ cache_type: {"readahead", "none", "mmap", "bytes"}, default "readahead"
1870
+ Caching policy in read mode. See the definitions in ``core``.
1871
+ cache_options : dict
1872
+ Additional options passed to the constructor for the cache specified
1873
+ by `cache_type`.
1874
+ size: int
1875
+ If given and in read mode, suppressed having to look up the file size
1876
+ kwargs:
1877
+ Gets stored as self.kwargs
1878
+ """
1879
+ from .core import caches
1880
+
1881
+ self.path = path
1882
+ self.fs = fs
1883
+ self.mode = mode
1884
+ self.blocksize = (
1885
+ self.DEFAULT_BLOCK_SIZE if block_size in ["default", None] else block_size
1886
+ )
1887
+ self.loc = 0
1888
+ self.autocommit = autocommit
1889
+ self.end = None
1890
+ self.start = None
1891
+ self.closed = False
1892
+
1893
+ if cache_options is None:
1894
+ cache_options = {}
1895
+
1896
+ if "trim" in kwargs:
1897
+ warnings.warn(
1898
+ "Passing 'trim' to control the cache behavior has been deprecated. "
1899
+ "Specify it within the 'cache_options' argument instead.",
1900
+ FutureWarning,
1901
+ )
1902
+ cache_options["trim"] = kwargs.pop("trim")
1903
+
1904
+ self.kwargs = kwargs
1905
+
1906
+ if mode not in {"ab", "rb", "wb", "xb"}:
1907
+ raise NotImplementedError("File mode not supported")
1908
+ if mode == "rb":
1909
+ if size is not None:
1910
+ self.size = size
1911
+ else:
1912
+ self.size = self.details["size"]
1913
+ self.cache = caches[cache_type](
1914
+ self.blocksize, self._fetch_range, self.size, **cache_options
1915
+ )
1916
+ else:
1917
+ self.buffer = io.BytesIO()
1918
+ self.offset = None
1919
+ self.forced = False
1920
+ self.location = None
1921
+
1922
+ @property
1923
+ def details(self):
1924
+ if self._details is None:
1925
+ self._details = self.fs.info(self.path)
1926
+ return self._details
1927
+
1928
+ @details.setter
1929
+ def details(self, value):
1930
+ self._details = value
1931
+ self.size = value["size"]
1932
+
1933
+ @property
1934
+ def full_name(self):
1935
+ return _unstrip_protocol(self.path, self.fs)
1936
+
1937
+ @property
1938
+ def closed(self):
1939
+ # get around this attr being read-only in IOBase
1940
+ # use getattr here, since this can be called during del
1941
+ return getattr(self, "_closed", True)
1942
+
1943
+ @closed.setter
1944
+ def closed(self, c):
1945
+ self._closed = c
1946
+
1947
+ def __hash__(self):
1948
+ if "w" in self.mode:
1949
+ return id(self)
1950
+ else:
1951
+ return int(tokenize(self.details), 16)
1952
+
1953
+ def __eq__(self, other):
1954
+ """Files are equal if they have the same checksum, only in read mode"""
1955
+ if self is other:
1956
+ return True
1957
+ return (
1958
+ isinstance(other, type(self))
1959
+ and self.mode == "rb"
1960
+ and other.mode == "rb"
1961
+ and hash(self) == hash(other)
1962
+ )
1963
+
1964
+ def commit(self):
1965
+ """Move from temp to final destination"""
1966
+
1967
+ def discard(self):
1968
+ """Throw away temporary file"""
1969
+
1970
+ def info(self):
1971
+ """File information about this path"""
1972
+ if self.readable():
1973
+ return self.details
1974
+ else:
1975
+ raise ValueError("Info not available while writing")
1976
+
1977
+ def tell(self):
1978
+ """Current file location"""
1979
+ return self.loc
1980
+
1981
+ def seek(self, loc, whence=0):
1982
+ """Set current file location
1983
+
1984
+ Parameters
1985
+ ----------
1986
+ loc: int
1987
+ byte location
1988
+ whence: {0, 1, 2}
1989
+ from start of file, current location or end of file, resp.
1990
+ """
1991
+ loc = int(loc)
1992
+ if not self.mode == "rb":
1993
+ raise OSError(ESPIPE, "Seek only available in read mode")
1994
+ if whence == 0:
1995
+ nloc = loc
1996
+ elif whence == 1:
1997
+ nloc = self.loc + loc
1998
+ elif whence == 2:
1999
+ nloc = self.size + loc
2000
+ else:
2001
+ raise ValueError(f"invalid whence ({whence}, should be 0, 1 or 2)")
2002
+ if nloc < 0:
2003
+ raise ValueError("Seek before start of file")
2004
+ self.loc = nloc
2005
+ return self.loc
2006
+
2007
+ def write(self, data):
2008
+ """
2009
+ Write data to buffer.
2010
+
2011
+ Buffer only sent on flush() or if buffer is greater than
2012
+ or equal to blocksize.
2013
+
2014
+ Parameters
2015
+ ----------
2016
+ data: bytes
2017
+ Set of bytes to be written.
2018
+ """
2019
+ if not self.writable():
2020
+ raise ValueError("File not in write mode")
2021
+ if self.closed:
2022
+ raise ValueError("I/O operation on closed file.")
2023
+ if self.forced:
2024
+ raise ValueError("This file has been force-flushed, can only close")
2025
+ out = self.buffer.write(data)
2026
+ self.loc += out
2027
+ if self.buffer.tell() >= self.blocksize:
2028
+ self.flush()
2029
+ return out
2030
+
2031
+ def flush(self, force=False):
2032
+ """
2033
+ Write buffered data to backend store.
2034
+
2035
+ Writes the current buffer, if it is larger than the block-size, or if
2036
+ the file is being closed.
2037
+
2038
+ Parameters
2039
+ ----------
2040
+ force: bool
2041
+ When closing, write the last block even if it is smaller than
2042
+ blocks are allowed to be. Disallows further writing to this file.
2043
+ """
2044
+
2045
+ if self.closed:
2046
+ raise ValueError("Flush on closed file")
2047
+ if force and self.forced:
2048
+ raise ValueError("Force flush cannot be called more than once")
2049
+ if force:
2050
+ self.forced = True
2051
+
2052
+ if self.readable():
2053
+ # no-op to flush on read-mode
2054
+ return
2055
+
2056
+ if not force and self.buffer.tell() < self.blocksize:
2057
+ # Defer write on small block
2058
+ return
2059
+
2060
+ if self.offset is None:
2061
+ # Initialize a multipart upload
2062
+ self.offset = 0
2063
+ try:
2064
+ self._initiate_upload()
2065
+ except:
2066
+ self.closed = True
2067
+ raise
2068
+
2069
+ if self._upload_chunk(final=force) is not False:
2070
+ self.offset += self.buffer.seek(0, 2)
2071
+ self.buffer = io.BytesIO()
2072
+
2073
+ def _upload_chunk(self, final=False):
2074
+ """Write one part of a multi-block file upload
2075
+
2076
+ Parameters
2077
+ ==========
2078
+ final: bool
2079
+ This is the last block, so should complete file, if
2080
+ self.autocommit is True.
2081
+ """
2082
+ # may not yet have been initialized, may need to call _initialize_upload
2083
+
2084
+ def _initiate_upload(self):
2085
+ """Create remote file/upload"""
2086
+ pass
2087
+
2088
+ def _fetch_range(self, start, end):
2089
+ """Get the specified set of bytes from remote"""
2090
+ return self.fs.cat_file(self.path, start=start, end=end)
2091
+
2092
+ def read(self, length=-1):
2093
+ """
2094
+ Return data from cache, or fetch pieces as necessary
2095
+
2096
+ Parameters
2097
+ ----------
2098
+ length: int (-1)
2099
+ Number of bytes to read; if <0, all remaining bytes.
2100
+ """
2101
+ length = -1 if length is None else int(length)
2102
+ if self.mode != "rb":
2103
+ raise ValueError("File not in read mode")
2104
+ if length < 0:
2105
+ length = self.size - self.loc
2106
+ if self.closed:
2107
+ raise ValueError("I/O operation on closed file.")
2108
+ if length == 0:
2109
+ # don't even bother calling fetch
2110
+ return b""
2111
+ out = self.cache._fetch(self.loc, self.loc + length)
2112
+
2113
+ logger.debug(
2114
+ "%s read: %i - %i %s",
2115
+ self,
2116
+ self.loc,
2117
+ self.loc + length,
2118
+ self.cache._log_stats(),
2119
+ )
2120
+ self.loc += len(out)
2121
+ return out
2122
+
2123
+ def readinto(self, b):
2124
+ """mirrors builtin file's readinto method
2125
+
2126
+ https://docs.python.org/3/library/io.html#io.RawIOBase.readinto
2127
+ """
2128
+ out = memoryview(b).cast("B")
2129
+ data = self.read(out.nbytes)
2130
+ out[: len(data)] = data
2131
+ return len(data)
2132
+
2133
+ def readuntil(self, char=b"\n", blocks=None):
2134
+ """Return data between current position and first occurrence of char
2135
+
2136
+ char is included in the output, except if the end of the tile is
2137
+ encountered first.
2138
+
2139
+ Parameters
2140
+ ----------
2141
+ char: bytes
2142
+ Thing to find
2143
+ blocks: None or int
2144
+ How much to read in each go. Defaults to file blocksize - which may
2145
+ mean a new read on every call.
2146
+ """
2147
+ out = []
2148
+ while True:
2149
+ start = self.tell()
2150
+ part = self.read(blocks or self.blocksize)
2151
+ if len(part) == 0:
2152
+ break
2153
+ found = part.find(char)
2154
+ if found > -1:
2155
+ out.append(part[: found + len(char)])
2156
+ self.seek(start + found + len(char))
2157
+ break
2158
+ out.append(part)
2159
+ return b"".join(out)
2160
+
2161
+ def readline(self):
2162
+ """Read until and including the first occurrence of newline character
2163
+
2164
+ Note that, because of character encoding, this is not necessarily a
2165
+ true line ending.
2166
+ """
2167
+ return self.readuntil(b"\n")
2168
+
2169
+ def __next__(self):
2170
+ out = self.readline()
2171
+ if out:
2172
+ return out
2173
+ raise StopIteration
2174
+
2175
+ def __iter__(self):
2176
+ return self
2177
+
2178
+ def readlines(self):
2179
+ """Return all data, split by the newline character, including the newline character"""
2180
+ data = self.read()
2181
+ lines = data.split(b"\n")
2182
+ out = [l + b"\n" for l in lines[:-1]]
2183
+ if data.endswith(b"\n"):
2184
+ return out
2185
+ else:
2186
+ return out + [lines[-1]]
2187
+ # return list(self) ???
2188
+
2189
+ def readinto1(self, b):
2190
+ return self.readinto(b)
2191
+
2192
+ def close(self):
2193
+ """Close file
2194
+
2195
+ Finalizes writes, discards cache
2196
+ """
2197
+ if getattr(self, "_unclosable", False):
2198
+ return
2199
+ if self.closed:
2200
+ return
2201
+ try:
2202
+ if self.mode == "rb":
2203
+ self.cache = None
2204
+ else:
2205
+ if not self.forced:
2206
+ self.flush(force=True)
2207
+
2208
+ if self.fs is not None:
2209
+ self.fs.invalidate_cache(self.path)
2210
+ self.fs.invalidate_cache(self.fs._parent(self.path))
2211
+ finally:
2212
+ self.closed = True
2213
+
2214
+ def readable(self):
2215
+ """Whether opened for reading"""
2216
+ return "r" in self.mode and not self.closed
2217
+
2218
+ def seekable(self):
2219
+ """Whether is seekable (only in read mode)"""
2220
+ return self.readable()
2221
+
2222
+ def writable(self):
2223
+ """Whether opened for writing"""
2224
+ return self.mode in {"wb", "ab", "xb"} and not self.closed
2225
+
2226
+ def __reduce__(self):
2227
+ if self.mode != "rb":
2228
+ raise RuntimeError("Pickling a writeable file is not supported")
2229
+
2230
+ return reopen, (
2231
+ self.fs,
2232
+ self.path,
2233
+ self.mode,
2234
+ self.blocksize,
2235
+ self.loc,
2236
+ self.size,
2237
+ self.autocommit,
2238
+ self.cache.name if self.cache else "none",
2239
+ self.kwargs,
2240
+ )
2241
+
2242
+ def __del__(self):
2243
+ if not self.closed:
2244
+ self.close()
2245
+
2246
+ def __str__(self):
2247
+ return f"<File-like object {type(self.fs).__name__}, {self.path}>"
2248
+
2249
+ __repr__ = __str__
2250
+
2251
+ def __enter__(self):
2252
+ return self
2253
+
2254
+ def __exit__(self, *args):
2255
+ self.close()
2256
+
2257
+
2258
+ def reopen(fs, path, mode, blocksize, loc, size, autocommit, cache_type, kwargs):
2259
+ file = fs.open(
2260
+ path,
2261
+ mode=mode,
2262
+ block_size=blocksize,
2263
+ autocommit=autocommit,
2264
+ cache_type=cache_type,
2265
+ size=size,
2266
+ **kwargs,
2267
+ )
2268
+ if loc > 0:
2269
+ file.seek(loc)
2270
+ return file
.venv/lib/python3.13/site-packages/fsspec/transaction.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import deque
2
+
3
+
4
+ class Transaction:
5
+ """Filesystem transaction write context
6
+
7
+ Gathers files for deferred commit or discard, so that several write
8
+ operations can be finalized semi-atomically. This works by having this
9
+ instance as the ``.transaction`` attribute of the given filesystem
10
+ """
11
+
12
+ def __init__(self, fs, **kwargs):
13
+ """
14
+ Parameters
15
+ ----------
16
+ fs: FileSystem instance
17
+ """
18
+ self.fs = fs
19
+ self.files = deque()
20
+
21
+ def __enter__(self):
22
+ self.start()
23
+ return self
24
+
25
+ def __exit__(self, exc_type, exc_val, exc_tb):
26
+ """End transaction and commit, if exit is not due to exception"""
27
+ # only commit if there was no exception
28
+ self.complete(commit=exc_type is None)
29
+ if self.fs:
30
+ self.fs._intrans = False
31
+ self.fs._transaction = None
32
+ self.fs = None
33
+
34
+ def start(self):
35
+ """Start a transaction on this FileSystem"""
36
+ self.files = deque() # clean up after previous failed completions
37
+ self.fs._intrans = True
38
+
39
+ def complete(self, commit=True):
40
+ """Finish transaction: commit or discard all deferred files"""
41
+ while self.files:
42
+ f = self.files.popleft()
43
+ if commit:
44
+ f.commit()
45
+ else:
46
+ f.discard()
47
+ self.fs._intrans = False
48
+ self.fs._transaction = None
49
+ self.fs = None
50
+
51
+
52
+ class FileActor:
53
+ def __init__(self):
54
+ self.files = []
55
+
56
+ def commit(self):
57
+ for f in self.files:
58
+ f.commit()
59
+ self.files.clear()
60
+
61
+ def discard(self):
62
+ for f in self.files:
63
+ f.discard()
64
+ self.files.clear()
65
+
66
+ def append(self, f):
67
+ self.files.append(f)
68
+
69
+
70
+ class DaskTransaction(Transaction):
71
+ def __init__(self, fs):
72
+ """
73
+ Parameters
74
+ ----------
75
+ fs: FileSystem instance
76
+ """
77
+ import distributed
78
+
79
+ super().__init__(fs)
80
+ client = distributed.default_client()
81
+ self.files = client.submit(FileActor, actor=True).result()
82
+
83
+ def complete(self, commit=True):
84
+ """Finish transaction: commit or discard all deferred files"""
85
+ if commit:
86
+ self.files.commit().result()
87
+ else:
88
+ self.files.discard().result()
89
+ self.fs._intrans = False
90
+ self.fs = None
.venv/lib/python3.13/site-packages/hf_xet-1.1.5.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ uv
.venv/lib/python3.13/site-packages/hf_xet-1.1.5.dist-info/METADATA ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.4
2
+ Name: hf-xet
3
+ Version: 1.1.5
4
+ Classifier: Programming Language :: Rust
5
+ Classifier: Programming Language :: Python :: Implementation :: CPython
6
+ Classifier: Programming Language :: Python :: Implementation :: PyPy
7
+ Requires-Dist: pytest ; extra == 'tests'
8
+ Provides-Extra: tests
9
+ License-File: LICENSE
10
+ Summary: Fast transfer of large files with the Hugging Face Hub.
11
+ License: Apache-2.0
12
+ Requires-Python: >=3.8
13
+ Description-Content-Type: text/markdown; charset=UTF-8; variant=GFM
14
+ Project-URL: Homepage, https://github.com/huggingface/xet-core
15
+ Project-URL: Documentation, https://huggingface.co/docs/hub/en/storage-backends#using-xet-storage
16
+ Project-URL: Issues, https://github.com/huggingface/xet-core/issues
17
+ Project-URL: Repository, https://github.com/huggingface/xet-core.git
18
+
19
+ # Development Notes
20
+
21
+ * `pip install maturin`
22
+ * from this directory: `maturin develop`
23
+
.venv/lib/python3.13/site-packages/hf_xet-1.1.5.dist-info/RECORD ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ hf_xet-1.1.5.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2
2
+ hf_xet-1.1.5.dist-info/METADATA,sha256=jFVsIrpX9kbs_VbMB9_GJtWfiAPSmAB1mR_05mJCQQ8,879
3
+ hf_xet-1.1.5.dist-info/RECORD,,
4
+ hf_xet-1.1.5.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
+ hf_xet-1.1.5.dist-info/WHEEL,sha256=UOOOi1PZxNVeqtcDwJVS3yrB4PgYf3v1e9aVmom1HlQ,102
6
+ hf_xet-1.1.5.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
7
+ hf_xet/__init__.py,sha256=E8UDdyQ8glZ_nve9hHEf22bPang8-RKx4VuApXYeQUo,107
8
+ hf_xet/hf_xet.abi3.so,sha256=2qK_JJVpRZgvXz1vRtMxN6o08JaOFCOFpQEH4IYmrIo,5469152
.venv/lib/python3.13/site-packages/hf_xet-1.1.5.dist-info/REQUESTED ADDED
File without changes
.venv/lib/python3.13/site-packages/hf_xet-1.1.5.dist-info/WHEEL ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: maturin (1.8.7)
3
+ Root-Is-Purelib: false
4
+ Tag: cp37-abi3-macosx_11_0_arm64
.venv/lib/python3.13/site-packages/hf_xet/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from .hf_xet import *
2
+
3
+ __doc__ = hf_xet.__doc__
4
+ if hasattr(hf_xet, "__all__"):
5
+ __all__ = hf_xet.__all__
.venv/lib/python3.13/site-packages/huggingface_hub/__init__.py ADDED
@@ -0,0 +1,1484 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # ***********
16
+ # `huggingface_hub` init has 2 modes:
17
+ # - Normal usage:
18
+ # If imported to use it, all modules and functions are lazy-loaded. This means
19
+ # they exist at top level in module but are imported only the first time they are
20
+ # used. This way, `from huggingface_hub import something` will import `something`
21
+ # quickly without the hassle of importing all the features from `huggingface_hub`.
22
+ # - Static check:
23
+ # If statically analyzed, all modules and functions are loaded normally. This way
24
+ # static typing check works properly as well as autocomplete in text editors and
25
+ # IDEs.
26
+ #
27
+ # The static model imports are done inside the `if TYPE_CHECKING:` statement at
28
+ # the bottom of this file. Since module/functions imports are duplicated, it is
29
+ # mandatory to make sure to add them twice when adding one. This is checked in the
30
+ # `make quality` command.
31
+ #
32
+ # To update the static imports, please run the following command and commit the changes.
33
+ # ```
34
+ # # Use script
35
+ # python utils/check_static_imports.py --update-file
36
+ #
37
+ # # Or run style on codebase
38
+ # make style
39
+ # ```
40
+ #
41
+ # ***********
42
+ # Lazy loader vendored from https://github.com/scientific-python/lazy_loader
43
+ import importlib
44
+ import os
45
+ import sys
46
+ from typing import TYPE_CHECKING
47
+
48
+
49
+ __version__ = "0.33.4"
50
+
51
+ # Alphabetical order of definitions is ensured in tests
52
+ # WARNING: any comment added in this dictionary definition will be lost when
53
+ # re-generating the file !
54
+ _SUBMOD_ATTRS = {
55
+ "_commit_scheduler": [
56
+ "CommitScheduler",
57
+ ],
58
+ "_inference_endpoints": [
59
+ "InferenceEndpoint",
60
+ "InferenceEndpointError",
61
+ "InferenceEndpointStatus",
62
+ "InferenceEndpointTimeoutError",
63
+ "InferenceEndpointType",
64
+ ],
65
+ "_login": [
66
+ "auth_list",
67
+ "auth_switch",
68
+ "interpreter_login",
69
+ "login",
70
+ "logout",
71
+ "notebook_login",
72
+ ],
73
+ "_oauth": [
74
+ "OAuthInfo",
75
+ "OAuthOrgInfo",
76
+ "OAuthUserInfo",
77
+ "attach_huggingface_oauth",
78
+ "parse_huggingface_oauth",
79
+ ],
80
+ "_snapshot_download": [
81
+ "snapshot_download",
82
+ ],
83
+ "_space_api": [
84
+ "SpaceHardware",
85
+ "SpaceRuntime",
86
+ "SpaceStage",
87
+ "SpaceStorage",
88
+ "SpaceVariable",
89
+ ],
90
+ "_tensorboard_logger": [
91
+ "HFSummaryWriter",
92
+ ],
93
+ "_webhooks_payload": [
94
+ "WebhookPayload",
95
+ "WebhookPayloadComment",
96
+ "WebhookPayloadDiscussion",
97
+ "WebhookPayloadDiscussionChanges",
98
+ "WebhookPayloadEvent",
99
+ "WebhookPayloadMovedTo",
100
+ "WebhookPayloadRepo",
101
+ "WebhookPayloadUrl",
102
+ "WebhookPayloadWebhook",
103
+ ],
104
+ "_webhooks_server": [
105
+ "WebhooksServer",
106
+ "webhook_endpoint",
107
+ ],
108
+ "community": [
109
+ "Discussion",
110
+ "DiscussionComment",
111
+ "DiscussionCommit",
112
+ "DiscussionEvent",
113
+ "DiscussionStatusChange",
114
+ "DiscussionTitleChange",
115
+ "DiscussionWithDetails",
116
+ ],
117
+ "constants": [
118
+ "CONFIG_NAME",
119
+ "FLAX_WEIGHTS_NAME",
120
+ "HUGGINGFACE_CO_URL_HOME",
121
+ "HUGGINGFACE_CO_URL_TEMPLATE",
122
+ "PYTORCH_WEIGHTS_NAME",
123
+ "REPO_TYPE_DATASET",
124
+ "REPO_TYPE_MODEL",
125
+ "REPO_TYPE_SPACE",
126
+ "TF2_WEIGHTS_NAME",
127
+ "TF_WEIGHTS_NAME",
128
+ ],
129
+ "fastai_utils": [
130
+ "_save_pretrained_fastai",
131
+ "from_pretrained_fastai",
132
+ "push_to_hub_fastai",
133
+ ],
134
+ "file_download": [
135
+ "HfFileMetadata",
136
+ "_CACHED_NO_EXIST",
137
+ "get_hf_file_metadata",
138
+ "hf_hub_download",
139
+ "hf_hub_url",
140
+ "try_to_load_from_cache",
141
+ ],
142
+ "hf_api": [
143
+ "Collection",
144
+ "CollectionItem",
145
+ "CommitInfo",
146
+ "CommitOperation",
147
+ "CommitOperationAdd",
148
+ "CommitOperationCopy",
149
+ "CommitOperationDelete",
150
+ "DatasetInfo",
151
+ "GitCommitInfo",
152
+ "GitRefInfo",
153
+ "GitRefs",
154
+ "HfApi",
155
+ "ModelInfo",
156
+ "RepoUrl",
157
+ "SpaceInfo",
158
+ "User",
159
+ "UserLikes",
160
+ "WebhookInfo",
161
+ "WebhookWatchedItem",
162
+ "accept_access_request",
163
+ "add_collection_item",
164
+ "add_space_secret",
165
+ "add_space_variable",
166
+ "auth_check",
167
+ "cancel_access_request",
168
+ "change_discussion_status",
169
+ "comment_discussion",
170
+ "create_branch",
171
+ "create_collection",
172
+ "create_commit",
173
+ "create_discussion",
174
+ "create_inference_endpoint",
175
+ "create_inference_endpoint_from_catalog",
176
+ "create_pull_request",
177
+ "create_repo",
178
+ "create_tag",
179
+ "create_webhook",
180
+ "dataset_info",
181
+ "delete_branch",
182
+ "delete_collection",
183
+ "delete_collection_item",
184
+ "delete_file",
185
+ "delete_folder",
186
+ "delete_inference_endpoint",
187
+ "delete_repo",
188
+ "delete_space_secret",
189
+ "delete_space_storage",
190
+ "delete_space_variable",
191
+ "delete_tag",
192
+ "delete_webhook",
193
+ "disable_webhook",
194
+ "duplicate_space",
195
+ "edit_discussion_comment",
196
+ "enable_webhook",
197
+ "file_exists",
198
+ "get_collection",
199
+ "get_dataset_tags",
200
+ "get_discussion_details",
201
+ "get_full_repo_name",
202
+ "get_inference_endpoint",
203
+ "get_model_tags",
204
+ "get_paths_info",
205
+ "get_repo_discussions",
206
+ "get_safetensors_metadata",
207
+ "get_space_runtime",
208
+ "get_space_variables",
209
+ "get_token_permission",
210
+ "get_user_overview",
211
+ "get_webhook",
212
+ "grant_access",
213
+ "list_accepted_access_requests",
214
+ "list_collections",
215
+ "list_datasets",
216
+ "list_inference_catalog",
217
+ "list_inference_endpoints",
218
+ "list_lfs_files",
219
+ "list_liked_repos",
220
+ "list_models",
221
+ "list_organization_members",
222
+ "list_papers",
223
+ "list_pending_access_requests",
224
+ "list_rejected_access_requests",
225
+ "list_repo_commits",
226
+ "list_repo_files",
227
+ "list_repo_likers",
228
+ "list_repo_refs",
229
+ "list_repo_tree",
230
+ "list_spaces",
231
+ "list_user_followers",
232
+ "list_user_following",
233
+ "list_webhooks",
234
+ "merge_pull_request",
235
+ "model_info",
236
+ "move_repo",
237
+ "paper_info",
238
+ "parse_safetensors_file_metadata",
239
+ "pause_inference_endpoint",
240
+ "pause_space",
241
+ "permanently_delete_lfs_files",
242
+ "preupload_lfs_files",
243
+ "reject_access_request",
244
+ "rename_discussion",
245
+ "repo_exists",
246
+ "repo_info",
247
+ "repo_type_and_id_from_hf_id",
248
+ "request_space_hardware",
249
+ "request_space_storage",
250
+ "restart_space",
251
+ "resume_inference_endpoint",
252
+ "revision_exists",
253
+ "run_as_future",
254
+ "scale_to_zero_inference_endpoint",
255
+ "set_space_sleep_time",
256
+ "space_info",
257
+ "super_squash_history",
258
+ "unlike",
259
+ "update_collection_item",
260
+ "update_collection_metadata",
261
+ "update_inference_endpoint",
262
+ "update_repo_settings",
263
+ "update_repo_visibility",
264
+ "update_webhook",
265
+ "upload_file",
266
+ "upload_folder",
267
+ "upload_large_folder",
268
+ "whoami",
269
+ ],
270
+ "hf_file_system": [
271
+ "HfFileSystem",
272
+ "HfFileSystemFile",
273
+ "HfFileSystemResolvedPath",
274
+ "HfFileSystemStreamFile",
275
+ ],
276
+ "hub_mixin": [
277
+ "ModelHubMixin",
278
+ "PyTorchModelHubMixin",
279
+ ],
280
+ "inference._client": [
281
+ "InferenceClient",
282
+ "InferenceTimeoutError",
283
+ ],
284
+ "inference._generated._async_client": [
285
+ "AsyncInferenceClient",
286
+ ],
287
+ "inference._generated.types": [
288
+ "AudioClassificationInput",
289
+ "AudioClassificationOutputElement",
290
+ "AudioClassificationOutputTransform",
291
+ "AudioClassificationParameters",
292
+ "AudioToAudioInput",
293
+ "AudioToAudioOutputElement",
294
+ "AutomaticSpeechRecognitionEarlyStoppingEnum",
295
+ "AutomaticSpeechRecognitionGenerationParameters",
296
+ "AutomaticSpeechRecognitionInput",
297
+ "AutomaticSpeechRecognitionOutput",
298
+ "AutomaticSpeechRecognitionOutputChunk",
299
+ "AutomaticSpeechRecognitionParameters",
300
+ "ChatCompletionInput",
301
+ "ChatCompletionInputFunctionDefinition",
302
+ "ChatCompletionInputFunctionName",
303
+ "ChatCompletionInputGrammarType",
304
+ "ChatCompletionInputJSONSchema",
305
+ "ChatCompletionInputMessage",
306
+ "ChatCompletionInputMessageChunk",
307
+ "ChatCompletionInputMessageChunkType",
308
+ "ChatCompletionInputResponseFormatJSONObject",
309
+ "ChatCompletionInputResponseFormatJSONSchema",
310
+ "ChatCompletionInputResponseFormatText",
311
+ "ChatCompletionInputStreamOptions",
312
+ "ChatCompletionInputTool",
313
+ "ChatCompletionInputToolCall",
314
+ "ChatCompletionInputToolChoiceClass",
315
+ "ChatCompletionInputToolChoiceEnum",
316
+ "ChatCompletionInputURL",
317
+ "ChatCompletionOutput",
318
+ "ChatCompletionOutputComplete",
319
+ "ChatCompletionOutputFunctionDefinition",
320
+ "ChatCompletionOutputLogprob",
321
+ "ChatCompletionOutputLogprobs",
322
+ "ChatCompletionOutputMessage",
323
+ "ChatCompletionOutputToolCall",
324
+ "ChatCompletionOutputTopLogprob",
325
+ "ChatCompletionOutputUsage",
326
+ "ChatCompletionStreamOutput",
327
+ "ChatCompletionStreamOutputChoice",
328
+ "ChatCompletionStreamOutputDelta",
329
+ "ChatCompletionStreamOutputDeltaToolCall",
330
+ "ChatCompletionStreamOutputFunction",
331
+ "ChatCompletionStreamOutputLogprob",
332
+ "ChatCompletionStreamOutputLogprobs",
333
+ "ChatCompletionStreamOutputTopLogprob",
334
+ "ChatCompletionStreamOutputUsage",
335
+ "DepthEstimationInput",
336
+ "DepthEstimationOutput",
337
+ "DocumentQuestionAnsweringInput",
338
+ "DocumentQuestionAnsweringInputData",
339
+ "DocumentQuestionAnsweringOutputElement",
340
+ "DocumentQuestionAnsweringParameters",
341
+ "FeatureExtractionInput",
342
+ "FeatureExtractionInputTruncationDirection",
343
+ "FillMaskInput",
344
+ "FillMaskOutputElement",
345
+ "FillMaskParameters",
346
+ "ImageClassificationInput",
347
+ "ImageClassificationOutputElement",
348
+ "ImageClassificationOutputTransform",
349
+ "ImageClassificationParameters",
350
+ "ImageSegmentationInput",
351
+ "ImageSegmentationOutputElement",
352
+ "ImageSegmentationParameters",
353
+ "ImageSegmentationSubtask",
354
+ "ImageToImageInput",
355
+ "ImageToImageOutput",
356
+ "ImageToImageParameters",
357
+ "ImageToImageTargetSize",
358
+ "ImageToTextEarlyStoppingEnum",
359
+ "ImageToTextGenerationParameters",
360
+ "ImageToTextInput",
361
+ "ImageToTextOutput",
362
+ "ImageToTextParameters",
363
+ "ObjectDetectionBoundingBox",
364
+ "ObjectDetectionInput",
365
+ "ObjectDetectionOutputElement",
366
+ "ObjectDetectionParameters",
367
+ "Padding",
368
+ "QuestionAnsweringInput",
369
+ "QuestionAnsweringInputData",
370
+ "QuestionAnsweringOutputElement",
371
+ "QuestionAnsweringParameters",
372
+ "SentenceSimilarityInput",
373
+ "SentenceSimilarityInputData",
374
+ "SummarizationInput",
375
+ "SummarizationOutput",
376
+ "SummarizationParameters",
377
+ "SummarizationTruncationStrategy",
378
+ "TableQuestionAnsweringInput",
379
+ "TableQuestionAnsweringInputData",
380
+ "TableQuestionAnsweringOutputElement",
381
+ "TableQuestionAnsweringParameters",
382
+ "Text2TextGenerationInput",
383
+ "Text2TextGenerationOutput",
384
+ "Text2TextGenerationParameters",
385
+ "Text2TextGenerationTruncationStrategy",
386
+ "TextClassificationInput",
387
+ "TextClassificationOutputElement",
388
+ "TextClassificationOutputTransform",
389
+ "TextClassificationParameters",
390
+ "TextGenerationInput",
391
+ "TextGenerationInputGenerateParameters",
392
+ "TextGenerationInputGrammarType",
393
+ "TextGenerationOutput",
394
+ "TextGenerationOutputBestOfSequence",
395
+ "TextGenerationOutputDetails",
396
+ "TextGenerationOutputFinishReason",
397
+ "TextGenerationOutputPrefillToken",
398
+ "TextGenerationOutputToken",
399
+ "TextGenerationStreamOutput",
400
+ "TextGenerationStreamOutputStreamDetails",
401
+ "TextGenerationStreamOutputToken",
402
+ "TextToAudioEarlyStoppingEnum",
403
+ "TextToAudioGenerationParameters",
404
+ "TextToAudioInput",
405
+ "TextToAudioOutput",
406
+ "TextToAudioParameters",
407
+ "TextToImageInput",
408
+ "TextToImageOutput",
409
+ "TextToImageParameters",
410
+ "TextToSpeechEarlyStoppingEnum",
411
+ "TextToSpeechGenerationParameters",
412
+ "TextToSpeechInput",
413
+ "TextToSpeechOutput",
414
+ "TextToSpeechParameters",
415
+ "TextToVideoInput",
416
+ "TextToVideoOutput",
417
+ "TextToVideoParameters",
418
+ "TokenClassificationAggregationStrategy",
419
+ "TokenClassificationInput",
420
+ "TokenClassificationOutputElement",
421
+ "TokenClassificationParameters",
422
+ "TranslationInput",
423
+ "TranslationOutput",
424
+ "TranslationParameters",
425
+ "TranslationTruncationStrategy",
426
+ "TypeEnum",
427
+ "VideoClassificationInput",
428
+ "VideoClassificationOutputElement",
429
+ "VideoClassificationOutputTransform",
430
+ "VideoClassificationParameters",
431
+ "VisualQuestionAnsweringInput",
432
+ "VisualQuestionAnsweringInputData",
433
+ "VisualQuestionAnsweringOutputElement",
434
+ "VisualQuestionAnsweringParameters",
435
+ "ZeroShotClassificationInput",
436
+ "ZeroShotClassificationOutputElement",
437
+ "ZeroShotClassificationParameters",
438
+ "ZeroShotImageClassificationInput",
439
+ "ZeroShotImageClassificationOutputElement",
440
+ "ZeroShotImageClassificationParameters",
441
+ "ZeroShotObjectDetectionBoundingBox",
442
+ "ZeroShotObjectDetectionInput",
443
+ "ZeroShotObjectDetectionOutputElement",
444
+ "ZeroShotObjectDetectionParameters",
445
+ ],
446
+ "inference._mcp.agent": [
447
+ "Agent",
448
+ ],
449
+ "inference._mcp.mcp_client": [
450
+ "MCPClient",
451
+ ],
452
+ "inference_api": [
453
+ "InferenceApi",
454
+ ],
455
+ "keras_mixin": [
456
+ "KerasModelHubMixin",
457
+ "from_pretrained_keras",
458
+ "push_to_hub_keras",
459
+ "save_pretrained_keras",
460
+ ],
461
+ "repocard": [
462
+ "DatasetCard",
463
+ "ModelCard",
464
+ "RepoCard",
465
+ "SpaceCard",
466
+ "metadata_eval_result",
467
+ "metadata_load",
468
+ "metadata_save",
469
+ "metadata_update",
470
+ ],
471
+ "repocard_data": [
472
+ "CardData",
473
+ "DatasetCardData",
474
+ "EvalResult",
475
+ "ModelCardData",
476
+ "SpaceCardData",
477
+ ],
478
+ "repository": [
479
+ "Repository",
480
+ ],
481
+ "serialization": [
482
+ "StateDictSplit",
483
+ "get_tf_storage_size",
484
+ "get_torch_storage_id",
485
+ "get_torch_storage_size",
486
+ "load_state_dict_from_file",
487
+ "load_torch_model",
488
+ "save_torch_model",
489
+ "save_torch_state_dict",
490
+ "split_state_dict_into_shards_factory",
491
+ "split_tf_state_dict_into_shards",
492
+ "split_torch_state_dict_into_shards",
493
+ ],
494
+ "serialization._dduf": [
495
+ "DDUFEntry",
496
+ "export_entries_as_dduf",
497
+ "export_folder_as_dduf",
498
+ "read_dduf_file",
499
+ ],
500
+ "utils": [
501
+ "CacheNotFound",
502
+ "CachedFileInfo",
503
+ "CachedRepoInfo",
504
+ "CachedRevisionInfo",
505
+ "CorruptedCacheException",
506
+ "DeleteCacheStrategy",
507
+ "HFCacheInfo",
508
+ "HfFolder",
509
+ "cached_assets_path",
510
+ "configure_http_backend",
511
+ "dump_environment_info",
512
+ "get_session",
513
+ "get_token",
514
+ "logging",
515
+ "scan_cache_dir",
516
+ ],
517
+ }
518
+
519
+ # WARNING: __all__ is generated automatically, Any manual edit will be lost when re-generating this file !
520
+ #
521
+ # To update the static imports, please run the following command and commit the changes.
522
+ # ```
523
+ # # Use script
524
+ # python utils/check_all_variable.py --update
525
+ #
526
+ # # Or run style on codebase
527
+ # make style
528
+ # ```
529
+
530
+ __all__ = [
531
+ "Agent",
532
+ "AsyncInferenceClient",
533
+ "AudioClassificationInput",
534
+ "AudioClassificationOutputElement",
535
+ "AudioClassificationOutputTransform",
536
+ "AudioClassificationParameters",
537
+ "AudioToAudioInput",
538
+ "AudioToAudioOutputElement",
539
+ "AutomaticSpeechRecognitionEarlyStoppingEnum",
540
+ "AutomaticSpeechRecognitionGenerationParameters",
541
+ "AutomaticSpeechRecognitionInput",
542
+ "AutomaticSpeechRecognitionOutput",
543
+ "AutomaticSpeechRecognitionOutputChunk",
544
+ "AutomaticSpeechRecognitionParameters",
545
+ "CONFIG_NAME",
546
+ "CacheNotFound",
547
+ "CachedFileInfo",
548
+ "CachedRepoInfo",
549
+ "CachedRevisionInfo",
550
+ "CardData",
551
+ "ChatCompletionInput",
552
+ "ChatCompletionInputFunctionDefinition",
553
+ "ChatCompletionInputFunctionName",
554
+ "ChatCompletionInputGrammarType",
555
+ "ChatCompletionInputJSONSchema",
556
+ "ChatCompletionInputMessage",
557
+ "ChatCompletionInputMessageChunk",
558
+ "ChatCompletionInputMessageChunkType",
559
+ "ChatCompletionInputResponseFormatJSONObject",
560
+ "ChatCompletionInputResponseFormatJSONSchema",
561
+ "ChatCompletionInputResponseFormatText",
562
+ "ChatCompletionInputStreamOptions",
563
+ "ChatCompletionInputTool",
564
+ "ChatCompletionInputToolCall",
565
+ "ChatCompletionInputToolChoiceClass",
566
+ "ChatCompletionInputToolChoiceEnum",
567
+ "ChatCompletionInputURL",
568
+ "ChatCompletionOutput",
569
+ "ChatCompletionOutputComplete",
570
+ "ChatCompletionOutputFunctionDefinition",
571
+ "ChatCompletionOutputLogprob",
572
+ "ChatCompletionOutputLogprobs",
573
+ "ChatCompletionOutputMessage",
574
+ "ChatCompletionOutputToolCall",
575
+ "ChatCompletionOutputTopLogprob",
576
+ "ChatCompletionOutputUsage",
577
+ "ChatCompletionStreamOutput",
578
+ "ChatCompletionStreamOutputChoice",
579
+ "ChatCompletionStreamOutputDelta",
580
+ "ChatCompletionStreamOutputDeltaToolCall",
581
+ "ChatCompletionStreamOutputFunction",
582
+ "ChatCompletionStreamOutputLogprob",
583
+ "ChatCompletionStreamOutputLogprobs",
584
+ "ChatCompletionStreamOutputTopLogprob",
585
+ "ChatCompletionStreamOutputUsage",
586
+ "Collection",
587
+ "CollectionItem",
588
+ "CommitInfo",
589
+ "CommitOperation",
590
+ "CommitOperationAdd",
591
+ "CommitOperationCopy",
592
+ "CommitOperationDelete",
593
+ "CommitScheduler",
594
+ "CorruptedCacheException",
595
+ "DDUFEntry",
596
+ "DatasetCard",
597
+ "DatasetCardData",
598
+ "DatasetInfo",
599
+ "DeleteCacheStrategy",
600
+ "DepthEstimationInput",
601
+ "DepthEstimationOutput",
602
+ "Discussion",
603
+ "DiscussionComment",
604
+ "DiscussionCommit",
605
+ "DiscussionEvent",
606
+ "DiscussionStatusChange",
607
+ "DiscussionTitleChange",
608
+ "DiscussionWithDetails",
609
+ "DocumentQuestionAnsweringInput",
610
+ "DocumentQuestionAnsweringInputData",
611
+ "DocumentQuestionAnsweringOutputElement",
612
+ "DocumentQuestionAnsweringParameters",
613
+ "EvalResult",
614
+ "FLAX_WEIGHTS_NAME",
615
+ "FeatureExtractionInput",
616
+ "FeatureExtractionInputTruncationDirection",
617
+ "FillMaskInput",
618
+ "FillMaskOutputElement",
619
+ "FillMaskParameters",
620
+ "GitCommitInfo",
621
+ "GitRefInfo",
622
+ "GitRefs",
623
+ "HFCacheInfo",
624
+ "HFSummaryWriter",
625
+ "HUGGINGFACE_CO_URL_HOME",
626
+ "HUGGINGFACE_CO_URL_TEMPLATE",
627
+ "HfApi",
628
+ "HfFileMetadata",
629
+ "HfFileSystem",
630
+ "HfFileSystemFile",
631
+ "HfFileSystemResolvedPath",
632
+ "HfFileSystemStreamFile",
633
+ "HfFolder",
634
+ "ImageClassificationInput",
635
+ "ImageClassificationOutputElement",
636
+ "ImageClassificationOutputTransform",
637
+ "ImageClassificationParameters",
638
+ "ImageSegmentationInput",
639
+ "ImageSegmentationOutputElement",
640
+ "ImageSegmentationParameters",
641
+ "ImageSegmentationSubtask",
642
+ "ImageToImageInput",
643
+ "ImageToImageOutput",
644
+ "ImageToImageParameters",
645
+ "ImageToImageTargetSize",
646
+ "ImageToTextEarlyStoppingEnum",
647
+ "ImageToTextGenerationParameters",
648
+ "ImageToTextInput",
649
+ "ImageToTextOutput",
650
+ "ImageToTextParameters",
651
+ "InferenceApi",
652
+ "InferenceClient",
653
+ "InferenceEndpoint",
654
+ "InferenceEndpointError",
655
+ "InferenceEndpointStatus",
656
+ "InferenceEndpointTimeoutError",
657
+ "InferenceEndpointType",
658
+ "InferenceTimeoutError",
659
+ "KerasModelHubMixin",
660
+ "MCPClient",
661
+ "ModelCard",
662
+ "ModelCardData",
663
+ "ModelHubMixin",
664
+ "ModelInfo",
665
+ "OAuthInfo",
666
+ "OAuthOrgInfo",
667
+ "OAuthUserInfo",
668
+ "ObjectDetectionBoundingBox",
669
+ "ObjectDetectionInput",
670
+ "ObjectDetectionOutputElement",
671
+ "ObjectDetectionParameters",
672
+ "PYTORCH_WEIGHTS_NAME",
673
+ "Padding",
674
+ "PyTorchModelHubMixin",
675
+ "QuestionAnsweringInput",
676
+ "QuestionAnsweringInputData",
677
+ "QuestionAnsweringOutputElement",
678
+ "QuestionAnsweringParameters",
679
+ "REPO_TYPE_DATASET",
680
+ "REPO_TYPE_MODEL",
681
+ "REPO_TYPE_SPACE",
682
+ "RepoCard",
683
+ "RepoUrl",
684
+ "Repository",
685
+ "SentenceSimilarityInput",
686
+ "SentenceSimilarityInputData",
687
+ "SpaceCard",
688
+ "SpaceCardData",
689
+ "SpaceHardware",
690
+ "SpaceInfo",
691
+ "SpaceRuntime",
692
+ "SpaceStage",
693
+ "SpaceStorage",
694
+ "SpaceVariable",
695
+ "StateDictSplit",
696
+ "SummarizationInput",
697
+ "SummarizationOutput",
698
+ "SummarizationParameters",
699
+ "SummarizationTruncationStrategy",
700
+ "TF2_WEIGHTS_NAME",
701
+ "TF_WEIGHTS_NAME",
702
+ "TableQuestionAnsweringInput",
703
+ "TableQuestionAnsweringInputData",
704
+ "TableQuestionAnsweringOutputElement",
705
+ "TableQuestionAnsweringParameters",
706
+ "Text2TextGenerationInput",
707
+ "Text2TextGenerationOutput",
708
+ "Text2TextGenerationParameters",
709
+ "Text2TextGenerationTruncationStrategy",
710
+ "TextClassificationInput",
711
+ "TextClassificationOutputElement",
712
+ "TextClassificationOutputTransform",
713
+ "TextClassificationParameters",
714
+ "TextGenerationInput",
715
+ "TextGenerationInputGenerateParameters",
716
+ "TextGenerationInputGrammarType",
717
+ "TextGenerationOutput",
718
+ "TextGenerationOutputBestOfSequence",
719
+ "TextGenerationOutputDetails",
720
+ "TextGenerationOutputFinishReason",
721
+ "TextGenerationOutputPrefillToken",
722
+ "TextGenerationOutputToken",
723
+ "TextGenerationStreamOutput",
724
+ "TextGenerationStreamOutputStreamDetails",
725
+ "TextGenerationStreamOutputToken",
726
+ "TextToAudioEarlyStoppingEnum",
727
+ "TextToAudioGenerationParameters",
728
+ "TextToAudioInput",
729
+ "TextToAudioOutput",
730
+ "TextToAudioParameters",
731
+ "TextToImageInput",
732
+ "TextToImageOutput",
733
+ "TextToImageParameters",
734
+ "TextToSpeechEarlyStoppingEnum",
735
+ "TextToSpeechGenerationParameters",
736
+ "TextToSpeechInput",
737
+ "TextToSpeechOutput",
738
+ "TextToSpeechParameters",
739
+ "TextToVideoInput",
740
+ "TextToVideoOutput",
741
+ "TextToVideoParameters",
742
+ "TokenClassificationAggregationStrategy",
743
+ "TokenClassificationInput",
744
+ "TokenClassificationOutputElement",
745
+ "TokenClassificationParameters",
746
+ "TranslationInput",
747
+ "TranslationOutput",
748
+ "TranslationParameters",
749
+ "TranslationTruncationStrategy",
750
+ "TypeEnum",
751
+ "User",
752
+ "UserLikes",
753
+ "VideoClassificationInput",
754
+ "VideoClassificationOutputElement",
755
+ "VideoClassificationOutputTransform",
756
+ "VideoClassificationParameters",
757
+ "VisualQuestionAnsweringInput",
758
+ "VisualQuestionAnsweringInputData",
759
+ "VisualQuestionAnsweringOutputElement",
760
+ "VisualQuestionAnsweringParameters",
761
+ "WebhookInfo",
762
+ "WebhookPayload",
763
+ "WebhookPayloadComment",
764
+ "WebhookPayloadDiscussion",
765
+ "WebhookPayloadDiscussionChanges",
766
+ "WebhookPayloadEvent",
767
+ "WebhookPayloadMovedTo",
768
+ "WebhookPayloadRepo",
769
+ "WebhookPayloadUrl",
770
+ "WebhookPayloadWebhook",
771
+ "WebhookWatchedItem",
772
+ "WebhooksServer",
773
+ "ZeroShotClassificationInput",
774
+ "ZeroShotClassificationOutputElement",
775
+ "ZeroShotClassificationParameters",
776
+ "ZeroShotImageClassificationInput",
777
+ "ZeroShotImageClassificationOutputElement",
778
+ "ZeroShotImageClassificationParameters",
779
+ "ZeroShotObjectDetectionBoundingBox",
780
+ "ZeroShotObjectDetectionInput",
781
+ "ZeroShotObjectDetectionOutputElement",
782
+ "ZeroShotObjectDetectionParameters",
783
+ "_CACHED_NO_EXIST",
784
+ "_save_pretrained_fastai",
785
+ "accept_access_request",
786
+ "add_collection_item",
787
+ "add_space_secret",
788
+ "add_space_variable",
789
+ "attach_huggingface_oauth",
790
+ "auth_check",
791
+ "auth_list",
792
+ "auth_switch",
793
+ "cached_assets_path",
794
+ "cancel_access_request",
795
+ "change_discussion_status",
796
+ "comment_discussion",
797
+ "configure_http_backend",
798
+ "create_branch",
799
+ "create_collection",
800
+ "create_commit",
801
+ "create_discussion",
802
+ "create_inference_endpoint",
803
+ "create_inference_endpoint_from_catalog",
804
+ "create_pull_request",
805
+ "create_repo",
806
+ "create_tag",
807
+ "create_webhook",
808
+ "dataset_info",
809
+ "delete_branch",
810
+ "delete_collection",
811
+ "delete_collection_item",
812
+ "delete_file",
813
+ "delete_folder",
814
+ "delete_inference_endpoint",
815
+ "delete_repo",
816
+ "delete_space_secret",
817
+ "delete_space_storage",
818
+ "delete_space_variable",
819
+ "delete_tag",
820
+ "delete_webhook",
821
+ "disable_webhook",
822
+ "dump_environment_info",
823
+ "duplicate_space",
824
+ "edit_discussion_comment",
825
+ "enable_webhook",
826
+ "export_entries_as_dduf",
827
+ "export_folder_as_dduf",
828
+ "file_exists",
829
+ "from_pretrained_fastai",
830
+ "from_pretrained_keras",
831
+ "get_collection",
832
+ "get_dataset_tags",
833
+ "get_discussion_details",
834
+ "get_full_repo_name",
835
+ "get_hf_file_metadata",
836
+ "get_inference_endpoint",
837
+ "get_model_tags",
838
+ "get_paths_info",
839
+ "get_repo_discussions",
840
+ "get_safetensors_metadata",
841
+ "get_session",
842
+ "get_space_runtime",
843
+ "get_space_variables",
844
+ "get_tf_storage_size",
845
+ "get_token",
846
+ "get_token_permission",
847
+ "get_torch_storage_id",
848
+ "get_torch_storage_size",
849
+ "get_user_overview",
850
+ "get_webhook",
851
+ "grant_access",
852
+ "hf_hub_download",
853
+ "hf_hub_url",
854
+ "interpreter_login",
855
+ "list_accepted_access_requests",
856
+ "list_collections",
857
+ "list_datasets",
858
+ "list_inference_catalog",
859
+ "list_inference_endpoints",
860
+ "list_lfs_files",
861
+ "list_liked_repos",
862
+ "list_models",
863
+ "list_organization_members",
864
+ "list_papers",
865
+ "list_pending_access_requests",
866
+ "list_rejected_access_requests",
867
+ "list_repo_commits",
868
+ "list_repo_files",
869
+ "list_repo_likers",
870
+ "list_repo_refs",
871
+ "list_repo_tree",
872
+ "list_spaces",
873
+ "list_user_followers",
874
+ "list_user_following",
875
+ "list_webhooks",
876
+ "load_state_dict_from_file",
877
+ "load_torch_model",
878
+ "logging",
879
+ "login",
880
+ "logout",
881
+ "merge_pull_request",
882
+ "metadata_eval_result",
883
+ "metadata_load",
884
+ "metadata_save",
885
+ "metadata_update",
886
+ "model_info",
887
+ "move_repo",
888
+ "notebook_login",
889
+ "paper_info",
890
+ "parse_huggingface_oauth",
891
+ "parse_safetensors_file_metadata",
892
+ "pause_inference_endpoint",
893
+ "pause_space",
894
+ "permanently_delete_lfs_files",
895
+ "preupload_lfs_files",
896
+ "push_to_hub_fastai",
897
+ "push_to_hub_keras",
898
+ "read_dduf_file",
899
+ "reject_access_request",
900
+ "rename_discussion",
901
+ "repo_exists",
902
+ "repo_info",
903
+ "repo_type_and_id_from_hf_id",
904
+ "request_space_hardware",
905
+ "request_space_storage",
906
+ "restart_space",
907
+ "resume_inference_endpoint",
908
+ "revision_exists",
909
+ "run_as_future",
910
+ "save_pretrained_keras",
911
+ "save_torch_model",
912
+ "save_torch_state_dict",
913
+ "scale_to_zero_inference_endpoint",
914
+ "scan_cache_dir",
915
+ "set_space_sleep_time",
916
+ "snapshot_download",
917
+ "space_info",
918
+ "split_state_dict_into_shards_factory",
919
+ "split_tf_state_dict_into_shards",
920
+ "split_torch_state_dict_into_shards",
921
+ "super_squash_history",
922
+ "try_to_load_from_cache",
923
+ "unlike",
924
+ "update_collection_item",
925
+ "update_collection_metadata",
926
+ "update_inference_endpoint",
927
+ "update_repo_settings",
928
+ "update_repo_visibility",
929
+ "update_webhook",
930
+ "upload_file",
931
+ "upload_folder",
932
+ "upload_large_folder",
933
+ "webhook_endpoint",
934
+ "whoami",
935
+ ]
936
+
937
+
938
+ def _attach(package_name, submodules=None, submod_attrs=None):
939
+ """Attach lazily loaded submodules, functions, or other attributes.
940
+
941
+ Typically, modules import submodules and attributes as follows:
942
+
943
+ ```py
944
+ import mysubmodule
945
+ import anothersubmodule
946
+
947
+ from .foo import someattr
948
+ ```
949
+
950
+ The idea is to replace a package's `__getattr__`, `__dir__`, such that all imports
951
+ work exactly the way they would with normal imports, except that the import occurs
952
+ upon first use.
953
+
954
+ The typical way to call this function, replacing the above imports, is:
955
+
956
+ ```python
957
+ __getattr__, __dir__ = lazy.attach(
958
+ __name__,
959
+ ['mysubmodule', 'anothersubmodule'],
960
+ {'foo': ['someattr']}
961
+ )
962
+ ```
963
+ This functionality requires Python 3.7 or higher.
964
+
965
+ Args:
966
+ package_name (`str`):
967
+ Typically use `__name__`.
968
+ submodules (`set`):
969
+ List of submodules to attach.
970
+ submod_attrs (`dict`):
971
+ Dictionary of submodule -> list of attributes / functions.
972
+ These attributes are imported as they are used.
973
+
974
+ Returns:
975
+ __getattr__, __dir__, __all__
976
+
977
+ """
978
+ if submod_attrs is None:
979
+ submod_attrs = {}
980
+
981
+ if submodules is None:
982
+ submodules = set()
983
+ else:
984
+ submodules = set(submodules)
985
+
986
+ attr_to_modules = {attr: mod for mod, attrs in submod_attrs.items() for attr in attrs}
987
+
988
+ def __getattr__(name):
989
+ if name in submodules:
990
+ try:
991
+ return importlib.import_module(f"{package_name}.{name}")
992
+ except Exception as e:
993
+ print(f"Error importing {package_name}.{name}: {e}")
994
+ raise
995
+ elif name in attr_to_modules:
996
+ submod_path = f"{package_name}.{attr_to_modules[name]}"
997
+ try:
998
+ submod = importlib.import_module(submod_path)
999
+ except Exception as e:
1000
+ print(f"Error importing {submod_path}: {e}")
1001
+ raise
1002
+ attr = getattr(submod, name)
1003
+
1004
+ # If the attribute lives in a file (module) with the same
1005
+ # name as the attribute, ensure that the attribute and *not*
1006
+ # the module is accessible on the package.
1007
+ if name == attr_to_modules[name]:
1008
+ pkg = sys.modules[package_name]
1009
+ pkg.__dict__[name] = attr
1010
+
1011
+ return attr
1012
+ else:
1013
+ raise AttributeError(f"No {package_name} attribute {name}")
1014
+
1015
+ def __dir__():
1016
+ return __all__
1017
+
1018
+ return __getattr__, __dir__
1019
+
1020
+
1021
+ __getattr__, __dir__ = _attach(__name__, submodules=[], submod_attrs=_SUBMOD_ATTRS)
1022
+
1023
+ if os.environ.get("EAGER_IMPORT", ""):
1024
+ for attr in __all__:
1025
+ __getattr__(attr)
1026
+
1027
+ # WARNING: any content below this statement is generated automatically. Any manual edit
1028
+ # will be lost when re-generating this file !
1029
+ #
1030
+ # To update the static imports, please run the following command and commit the changes.
1031
+ # ```
1032
+ # # Use script
1033
+ # python utils/check_static_imports.py --update
1034
+ #
1035
+ # # Or run style on codebase
1036
+ # make style
1037
+ # ```
1038
+ if TYPE_CHECKING: # pragma: no cover
1039
+ from ._commit_scheduler import CommitScheduler # noqa: F401
1040
+ from ._inference_endpoints import (
1041
+ InferenceEndpoint, # noqa: F401
1042
+ InferenceEndpointError, # noqa: F401
1043
+ InferenceEndpointStatus, # noqa: F401
1044
+ InferenceEndpointTimeoutError, # noqa: F401
1045
+ InferenceEndpointType, # noqa: F401
1046
+ )
1047
+ from ._login import (
1048
+ auth_list, # noqa: F401
1049
+ auth_switch, # noqa: F401
1050
+ interpreter_login, # noqa: F401
1051
+ login, # noqa: F401
1052
+ logout, # noqa: F401
1053
+ notebook_login, # noqa: F401
1054
+ )
1055
+ from ._oauth import (
1056
+ OAuthInfo, # noqa: F401
1057
+ OAuthOrgInfo, # noqa: F401
1058
+ OAuthUserInfo, # noqa: F401
1059
+ attach_huggingface_oauth, # noqa: F401
1060
+ parse_huggingface_oauth, # noqa: F401
1061
+ )
1062
+ from ._snapshot_download import snapshot_download # noqa: F401
1063
+ from ._space_api import (
1064
+ SpaceHardware, # noqa: F401
1065
+ SpaceRuntime, # noqa: F401
1066
+ SpaceStage, # noqa: F401
1067
+ SpaceStorage, # noqa: F401
1068
+ SpaceVariable, # noqa: F401
1069
+ )
1070
+ from ._tensorboard_logger import HFSummaryWriter # noqa: F401
1071
+ from ._webhooks_payload import (
1072
+ WebhookPayload, # noqa: F401
1073
+ WebhookPayloadComment, # noqa: F401
1074
+ WebhookPayloadDiscussion, # noqa: F401
1075
+ WebhookPayloadDiscussionChanges, # noqa: F401
1076
+ WebhookPayloadEvent, # noqa: F401
1077
+ WebhookPayloadMovedTo, # noqa: F401
1078
+ WebhookPayloadRepo, # noqa: F401
1079
+ WebhookPayloadUrl, # noqa: F401
1080
+ WebhookPayloadWebhook, # noqa: F401
1081
+ )
1082
+ from ._webhooks_server import (
1083
+ WebhooksServer, # noqa: F401
1084
+ webhook_endpoint, # noqa: F401
1085
+ )
1086
+ from .community import (
1087
+ Discussion, # noqa: F401
1088
+ DiscussionComment, # noqa: F401
1089
+ DiscussionCommit, # noqa: F401
1090
+ DiscussionEvent, # noqa: F401
1091
+ DiscussionStatusChange, # noqa: F401
1092
+ DiscussionTitleChange, # noqa: F401
1093
+ DiscussionWithDetails, # noqa: F401
1094
+ )
1095
+ from .constants import (
1096
+ CONFIG_NAME, # noqa: F401
1097
+ FLAX_WEIGHTS_NAME, # noqa: F401
1098
+ HUGGINGFACE_CO_URL_HOME, # noqa: F401
1099
+ HUGGINGFACE_CO_URL_TEMPLATE, # noqa: F401
1100
+ PYTORCH_WEIGHTS_NAME, # noqa: F401
1101
+ REPO_TYPE_DATASET, # noqa: F401
1102
+ REPO_TYPE_MODEL, # noqa: F401
1103
+ REPO_TYPE_SPACE, # noqa: F401
1104
+ TF2_WEIGHTS_NAME, # noqa: F401
1105
+ TF_WEIGHTS_NAME, # noqa: F401
1106
+ )
1107
+ from .fastai_utils import (
1108
+ _save_pretrained_fastai, # noqa: F401
1109
+ from_pretrained_fastai, # noqa: F401
1110
+ push_to_hub_fastai, # noqa: F401
1111
+ )
1112
+ from .file_download import (
1113
+ _CACHED_NO_EXIST, # noqa: F401
1114
+ HfFileMetadata, # noqa: F401
1115
+ get_hf_file_metadata, # noqa: F401
1116
+ hf_hub_download, # noqa: F401
1117
+ hf_hub_url, # noqa: F401
1118
+ try_to_load_from_cache, # noqa: F401
1119
+ )
1120
+ from .hf_api import (
1121
+ Collection, # noqa: F401
1122
+ CollectionItem, # noqa: F401
1123
+ CommitInfo, # noqa: F401
1124
+ CommitOperation, # noqa: F401
1125
+ CommitOperationAdd, # noqa: F401
1126
+ CommitOperationCopy, # noqa: F401
1127
+ CommitOperationDelete, # noqa: F401
1128
+ DatasetInfo, # noqa: F401
1129
+ GitCommitInfo, # noqa: F401
1130
+ GitRefInfo, # noqa: F401
1131
+ GitRefs, # noqa: F401
1132
+ HfApi, # noqa: F401
1133
+ ModelInfo, # noqa: F401
1134
+ RepoUrl, # noqa: F401
1135
+ SpaceInfo, # noqa: F401
1136
+ User, # noqa: F401
1137
+ UserLikes, # noqa: F401
1138
+ WebhookInfo, # noqa: F401
1139
+ WebhookWatchedItem, # noqa: F401
1140
+ accept_access_request, # noqa: F401
1141
+ add_collection_item, # noqa: F401
1142
+ add_space_secret, # noqa: F401
1143
+ add_space_variable, # noqa: F401
1144
+ auth_check, # noqa: F401
1145
+ cancel_access_request, # noqa: F401
1146
+ change_discussion_status, # noqa: F401
1147
+ comment_discussion, # noqa: F401
1148
+ create_branch, # noqa: F401
1149
+ create_collection, # noqa: F401
1150
+ create_commit, # noqa: F401
1151
+ create_discussion, # noqa: F401
1152
+ create_inference_endpoint, # noqa: F401
1153
+ create_inference_endpoint_from_catalog, # noqa: F401
1154
+ create_pull_request, # noqa: F401
1155
+ create_repo, # noqa: F401
1156
+ create_tag, # noqa: F401
1157
+ create_webhook, # noqa: F401
1158
+ dataset_info, # noqa: F401
1159
+ delete_branch, # noqa: F401
1160
+ delete_collection, # noqa: F401
1161
+ delete_collection_item, # noqa: F401
1162
+ delete_file, # noqa: F401
1163
+ delete_folder, # noqa: F401
1164
+ delete_inference_endpoint, # noqa: F401
1165
+ delete_repo, # noqa: F401
1166
+ delete_space_secret, # noqa: F401
1167
+ delete_space_storage, # noqa: F401
1168
+ delete_space_variable, # noqa: F401
1169
+ delete_tag, # noqa: F401
1170
+ delete_webhook, # noqa: F401
1171
+ disable_webhook, # noqa: F401
1172
+ duplicate_space, # noqa: F401
1173
+ edit_discussion_comment, # noqa: F401
1174
+ enable_webhook, # noqa: F401
1175
+ file_exists, # noqa: F401
1176
+ get_collection, # noqa: F401
1177
+ get_dataset_tags, # noqa: F401
1178
+ get_discussion_details, # noqa: F401
1179
+ get_full_repo_name, # noqa: F401
1180
+ get_inference_endpoint, # noqa: F401
1181
+ get_model_tags, # noqa: F401
1182
+ get_paths_info, # noqa: F401
1183
+ get_repo_discussions, # noqa: F401
1184
+ get_safetensors_metadata, # noqa: F401
1185
+ get_space_runtime, # noqa: F401
1186
+ get_space_variables, # noqa: F401
1187
+ get_token_permission, # noqa: F401
1188
+ get_user_overview, # noqa: F401
1189
+ get_webhook, # noqa: F401
1190
+ grant_access, # noqa: F401
1191
+ list_accepted_access_requests, # noqa: F401
1192
+ list_collections, # noqa: F401
1193
+ list_datasets, # noqa: F401
1194
+ list_inference_catalog, # noqa: F401
1195
+ list_inference_endpoints, # noqa: F401
1196
+ list_lfs_files, # noqa: F401
1197
+ list_liked_repos, # noqa: F401
1198
+ list_models, # noqa: F401
1199
+ list_organization_members, # noqa: F401
1200
+ list_papers, # noqa: F401
1201
+ list_pending_access_requests, # noqa: F401
1202
+ list_rejected_access_requests, # noqa: F401
1203
+ list_repo_commits, # noqa: F401
1204
+ list_repo_files, # noqa: F401
1205
+ list_repo_likers, # noqa: F401
1206
+ list_repo_refs, # noqa: F401
1207
+ list_repo_tree, # noqa: F401
1208
+ list_spaces, # noqa: F401
1209
+ list_user_followers, # noqa: F401
1210
+ list_user_following, # noqa: F401
1211
+ list_webhooks, # noqa: F401
1212
+ merge_pull_request, # noqa: F401
1213
+ model_info, # noqa: F401
1214
+ move_repo, # noqa: F401
1215
+ paper_info, # noqa: F401
1216
+ parse_safetensors_file_metadata, # noqa: F401
1217
+ pause_inference_endpoint, # noqa: F401
1218
+ pause_space, # noqa: F401
1219
+ permanently_delete_lfs_files, # noqa: F401
1220
+ preupload_lfs_files, # noqa: F401
1221
+ reject_access_request, # noqa: F401
1222
+ rename_discussion, # noqa: F401
1223
+ repo_exists, # noqa: F401
1224
+ repo_info, # noqa: F401
1225
+ repo_type_and_id_from_hf_id, # noqa: F401
1226
+ request_space_hardware, # noqa: F401
1227
+ request_space_storage, # noqa: F401
1228
+ restart_space, # noqa: F401
1229
+ resume_inference_endpoint, # noqa: F401
1230
+ revision_exists, # noqa: F401
1231
+ run_as_future, # noqa: F401
1232
+ scale_to_zero_inference_endpoint, # noqa: F401
1233
+ set_space_sleep_time, # noqa: F401
1234
+ space_info, # noqa: F401
1235
+ super_squash_history, # noqa: F401
1236
+ unlike, # noqa: F401
1237
+ update_collection_item, # noqa: F401
1238
+ update_collection_metadata, # noqa: F401
1239
+ update_inference_endpoint, # noqa: F401
1240
+ update_repo_settings, # noqa: F401
1241
+ update_repo_visibility, # noqa: F401
1242
+ update_webhook, # noqa: F401
1243
+ upload_file, # noqa: F401
1244
+ upload_folder, # noqa: F401
1245
+ upload_large_folder, # noqa: F401
1246
+ whoami, # noqa: F401
1247
+ )
1248
+ from .hf_file_system import (
1249
+ HfFileSystem, # noqa: F401
1250
+ HfFileSystemFile, # noqa: F401
1251
+ HfFileSystemResolvedPath, # noqa: F401
1252
+ HfFileSystemStreamFile, # noqa: F401
1253
+ )
1254
+ from .hub_mixin import (
1255
+ ModelHubMixin, # noqa: F401
1256
+ PyTorchModelHubMixin, # noqa: F401
1257
+ )
1258
+ from .inference._client import (
1259
+ InferenceClient, # noqa: F401
1260
+ InferenceTimeoutError, # noqa: F401
1261
+ )
1262
+ from .inference._generated._async_client import AsyncInferenceClient # noqa: F401
1263
+ from .inference._generated.types import (
1264
+ AudioClassificationInput, # noqa: F401
1265
+ AudioClassificationOutputElement, # noqa: F401
1266
+ AudioClassificationOutputTransform, # noqa: F401
1267
+ AudioClassificationParameters, # noqa: F401
1268
+ AudioToAudioInput, # noqa: F401
1269
+ AudioToAudioOutputElement, # noqa: F401
1270
+ AutomaticSpeechRecognitionEarlyStoppingEnum, # noqa: F401
1271
+ AutomaticSpeechRecognitionGenerationParameters, # noqa: F401
1272
+ AutomaticSpeechRecognitionInput, # noqa: F401
1273
+ AutomaticSpeechRecognitionOutput, # noqa: F401
1274
+ AutomaticSpeechRecognitionOutputChunk, # noqa: F401
1275
+ AutomaticSpeechRecognitionParameters, # noqa: F401
1276
+ ChatCompletionInput, # noqa: F401
1277
+ ChatCompletionInputFunctionDefinition, # noqa: F401
1278
+ ChatCompletionInputFunctionName, # noqa: F401
1279
+ ChatCompletionInputGrammarType, # noqa: F401
1280
+ ChatCompletionInputJSONSchema, # noqa: F401
1281
+ ChatCompletionInputMessage, # noqa: F401
1282
+ ChatCompletionInputMessageChunk, # noqa: F401
1283
+ ChatCompletionInputMessageChunkType, # noqa: F401
1284
+ ChatCompletionInputResponseFormatJSONObject, # noqa: F401
1285
+ ChatCompletionInputResponseFormatJSONSchema, # noqa: F401
1286
+ ChatCompletionInputResponseFormatText, # noqa: F401
1287
+ ChatCompletionInputStreamOptions, # noqa: F401
1288
+ ChatCompletionInputTool, # noqa: F401
1289
+ ChatCompletionInputToolCall, # noqa: F401
1290
+ ChatCompletionInputToolChoiceClass, # noqa: F401
1291
+ ChatCompletionInputToolChoiceEnum, # noqa: F401
1292
+ ChatCompletionInputURL, # noqa: F401
1293
+ ChatCompletionOutput, # noqa: F401
1294
+ ChatCompletionOutputComplete, # noqa: F401
1295
+ ChatCompletionOutputFunctionDefinition, # noqa: F401
1296
+ ChatCompletionOutputLogprob, # noqa: F401
1297
+ ChatCompletionOutputLogprobs, # noqa: F401
1298
+ ChatCompletionOutputMessage, # noqa: F401
1299
+ ChatCompletionOutputToolCall, # noqa: F401
1300
+ ChatCompletionOutputTopLogprob, # noqa: F401
1301
+ ChatCompletionOutputUsage, # noqa: F401
1302
+ ChatCompletionStreamOutput, # noqa: F401
1303
+ ChatCompletionStreamOutputChoice, # noqa: F401
1304
+ ChatCompletionStreamOutputDelta, # noqa: F401
1305
+ ChatCompletionStreamOutputDeltaToolCall, # noqa: F401
1306
+ ChatCompletionStreamOutputFunction, # noqa: F401
1307
+ ChatCompletionStreamOutputLogprob, # noqa: F401
1308
+ ChatCompletionStreamOutputLogprobs, # noqa: F401
1309
+ ChatCompletionStreamOutputTopLogprob, # noqa: F401
1310
+ ChatCompletionStreamOutputUsage, # noqa: F401
1311
+ DepthEstimationInput, # noqa: F401
1312
+ DepthEstimationOutput, # noqa: F401
1313
+ DocumentQuestionAnsweringInput, # noqa: F401
1314
+ DocumentQuestionAnsweringInputData, # noqa: F401
1315
+ DocumentQuestionAnsweringOutputElement, # noqa: F401
1316
+ DocumentQuestionAnsweringParameters, # noqa: F401
1317
+ FeatureExtractionInput, # noqa: F401
1318
+ FeatureExtractionInputTruncationDirection, # noqa: F401
1319
+ FillMaskInput, # noqa: F401
1320
+ FillMaskOutputElement, # noqa: F401
1321
+ FillMaskParameters, # noqa: F401
1322
+ ImageClassificationInput, # noqa: F401
1323
+ ImageClassificationOutputElement, # noqa: F401
1324
+ ImageClassificationOutputTransform, # noqa: F401
1325
+ ImageClassificationParameters, # noqa: F401
1326
+ ImageSegmentationInput, # noqa: F401
1327
+ ImageSegmentationOutputElement, # noqa: F401
1328
+ ImageSegmentationParameters, # noqa: F401
1329
+ ImageSegmentationSubtask, # noqa: F401
1330
+ ImageToImageInput, # noqa: F401
1331
+ ImageToImageOutput, # noqa: F401
1332
+ ImageToImageParameters, # noqa: F401
1333
+ ImageToImageTargetSize, # noqa: F401
1334
+ ImageToTextEarlyStoppingEnum, # noqa: F401
1335
+ ImageToTextGenerationParameters, # noqa: F401
1336
+ ImageToTextInput, # noqa: F401
1337
+ ImageToTextOutput, # noqa: F401
1338
+ ImageToTextParameters, # noqa: F401
1339
+ ObjectDetectionBoundingBox, # noqa: F401
1340
+ ObjectDetectionInput, # noqa: F401
1341
+ ObjectDetectionOutputElement, # noqa: F401
1342
+ ObjectDetectionParameters, # noqa: F401
1343
+ Padding, # noqa: F401
1344
+ QuestionAnsweringInput, # noqa: F401
1345
+ QuestionAnsweringInputData, # noqa: F401
1346
+ QuestionAnsweringOutputElement, # noqa: F401
1347
+ QuestionAnsweringParameters, # noqa: F401
1348
+ SentenceSimilarityInput, # noqa: F401
1349
+ SentenceSimilarityInputData, # noqa: F401
1350
+ SummarizationInput, # noqa: F401
1351
+ SummarizationOutput, # noqa: F401
1352
+ SummarizationParameters, # noqa: F401
1353
+ SummarizationTruncationStrategy, # noqa: F401
1354
+ TableQuestionAnsweringInput, # noqa: F401
1355
+ TableQuestionAnsweringInputData, # noqa: F401
1356
+ TableQuestionAnsweringOutputElement, # noqa: F401
1357
+ TableQuestionAnsweringParameters, # noqa: F401
1358
+ Text2TextGenerationInput, # noqa: F401
1359
+ Text2TextGenerationOutput, # noqa: F401
1360
+ Text2TextGenerationParameters, # noqa: F401
1361
+ Text2TextGenerationTruncationStrategy, # noqa: F401
1362
+ TextClassificationInput, # noqa: F401
1363
+ TextClassificationOutputElement, # noqa: F401
1364
+ TextClassificationOutputTransform, # noqa: F401
1365
+ TextClassificationParameters, # noqa: F401
1366
+ TextGenerationInput, # noqa: F401
1367
+ TextGenerationInputGenerateParameters, # noqa: F401
1368
+ TextGenerationInputGrammarType, # noqa: F401
1369
+ TextGenerationOutput, # noqa: F401
1370
+ TextGenerationOutputBestOfSequence, # noqa: F401
1371
+ TextGenerationOutputDetails, # noqa: F401
1372
+ TextGenerationOutputFinishReason, # noqa: F401
1373
+ TextGenerationOutputPrefillToken, # noqa: F401
1374
+ TextGenerationOutputToken, # noqa: F401
1375
+ TextGenerationStreamOutput, # noqa: F401
1376
+ TextGenerationStreamOutputStreamDetails, # noqa: F401
1377
+ TextGenerationStreamOutputToken, # noqa: F401
1378
+ TextToAudioEarlyStoppingEnum, # noqa: F401
1379
+ TextToAudioGenerationParameters, # noqa: F401
1380
+ TextToAudioInput, # noqa: F401
1381
+ TextToAudioOutput, # noqa: F401
1382
+ TextToAudioParameters, # noqa: F401
1383
+ TextToImageInput, # noqa: F401
1384
+ TextToImageOutput, # noqa: F401
1385
+ TextToImageParameters, # noqa: F401
1386
+ TextToSpeechEarlyStoppingEnum, # noqa: F401
1387
+ TextToSpeechGenerationParameters, # noqa: F401
1388
+ TextToSpeechInput, # noqa: F401
1389
+ TextToSpeechOutput, # noqa: F401
1390
+ TextToSpeechParameters, # noqa: F401
1391
+ TextToVideoInput, # noqa: F401
1392
+ TextToVideoOutput, # noqa: F401
1393
+ TextToVideoParameters, # noqa: F401
1394
+ TokenClassificationAggregationStrategy, # noqa: F401
1395
+ TokenClassificationInput, # noqa: F401
1396
+ TokenClassificationOutputElement, # noqa: F401
1397
+ TokenClassificationParameters, # noqa: F401
1398
+ TranslationInput, # noqa: F401
1399
+ TranslationOutput, # noqa: F401
1400
+ TranslationParameters, # noqa: F401
1401
+ TranslationTruncationStrategy, # noqa: F401
1402
+ TypeEnum, # noqa: F401
1403
+ VideoClassificationInput, # noqa: F401
1404
+ VideoClassificationOutputElement, # noqa: F401
1405
+ VideoClassificationOutputTransform, # noqa: F401
1406
+ VideoClassificationParameters, # noqa: F401
1407
+ VisualQuestionAnsweringInput, # noqa: F401
1408
+ VisualQuestionAnsweringInputData, # noqa: F401
1409
+ VisualQuestionAnsweringOutputElement, # noqa: F401
1410
+ VisualQuestionAnsweringParameters, # noqa: F401
1411
+ ZeroShotClassificationInput, # noqa: F401
1412
+ ZeroShotClassificationOutputElement, # noqa: F401
1413
+ ZeroShotClassificationParameters, # noqa: F401
1414
+ ZeroShotImageClassificationInput, # noqa: F401
1415
+ ZeroShotImageClassificationOutputElement, # noqa: F401
1416
+ ZeroShotImageClassificationParameters, # noqa: F401
1417
+ ZeroShotObjectDetectionBoundingBox, # noqa: F401
1418
+ ZeroShotObjectDetectionInput, # noqa: F401
1419
+ ZeroShotObjectDetectionOutputElement, # noqa: F401
1420
+ ZeroShotObjectDetectionParameters, # noqa: F401
1421
+ )
1422
+ from .inference._mcp.agent import Agent # noqa: F401
1423
+ from .inference._mcp.mcp_client import MCPClient # noqa: F401
1424
+ from .inference_api import InferenceApi # noqa: F401
1425
+ from .keras_mixin import (
1426
+ KerasModelHubMixin, # noqa: F401
1427
+ from_pretrained_keras, # noqa: F401
1428
+ push_to_hub_keras, # noqa: F401
1429
+ save_pretrained_keras, # noqa: F401
1430
+ )
1431
+ from .repocard import (
1432
+ DatasetCard, # noqa: F401
1433
+ ModelCard, # noqa: F401
1434
+ RepoCard, # noqa: F401
1435
+ SpaceCard, # noqa: F401
1436
+ metadata_eval_result, # noqa: F401
1437
+ metadata_load, # noqa: F401
1438
+ metadata_save, # noqa: F401
1439
+ metadata_update, # noqa: F401
1440
+ )
1441
+ from .repocard_data import (
1442
+ CardData, # noqa: F401
1443
+ DatasetCardData, # noqa: F401
1444
+ EvalResult, # noqa: F401
1445
+ ModelCardData, # noqa: F401
1446
+ SpaceCardData, # noqa: F401
1447
+ )
1448
+ from .repository import Repository # noqa: F401
1449
+ from .serialization import (
1450
+ StateDictSplit, # noqa: F401
1451
+ get_tf_storage_size, # noqa: F401
1452
+ get_torch_storage_id, # noqa: F401
1453
+ get_torch_storage_size, # noqa: F401
1454
+ load_state_dict_from_file, # noqa: F401
1455
+ load_torch_model, # noqa: F401
1456
+ save_torch_model, # noqa: F401
1457
+ save_torch_state_dict, # noqa: F401
1458
+ split_state_dict_into_shards_factory, # noqa: F401
1459
+ split_tf_state_dict_into_shards, # noqa: F401
1460
+ split_torch_state_dict_into_shards, # noqa: F401
1461
+ )
1462
+ from .serialization._dduf import (
1463
+ DDUFEntry, # noqa: F401
1464
+ export_entries_as_dduf, # noqa: F401
1465
+ export_folder_as_dduf, # noqa: F401
1466
+ read_dduf_file, # noqa: F401
1467
+ )
1468
+ from .utils import (
1469
+ CachedFileInfo, # noqa: F401
1470
+ CachedRepoInfo, # noqa: F401
1471
+ CachedRevisionInfo, # noqa: F401
1472
+ CacheNotFound, # noqa: F401
1473
+ CorruptedCacheException, # noqa: F401
1474
+ DeleteCacheStrategy, # noqa: F401
1475
+ HFCacheInfo, # noqa: F401
1476
+ HfFolder, # noqa: F401
1477
+ cached_assets_path, # noqa: F401
1478
+ configure_http_backend, # noqa: F401
1479
+ dump_environment_info, # noqa: F401
1480
+ get_session, # noqa: F401
1481
+ get_token, # noqa: F401
1482
+ logging, # noqa: F401
1483
+ scan_cache_dir, # noqa: F401
1484
+ )
.venv/lib/python3.13/site-packages/huggingface_hub/_commit_api.py ADDED
@@ -0,0 +1,915 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Type definitions and utilities for the `create_commit` API
3
+ """
4
+
5
+ import base64
6
+ import io
7
+ import math
8
+ import os
9
+ import warnings
10
+ from collections import defaultdict
11
+ from contextlib import contextmanager
12
+ from dataclasses import dataclass, field
13
+ from itertools import groupby
14
+ from pathlib import Path, PurePosixPath
15
+ from typing import TYPE_CHECKING, Any, BinaryIO, Dict, Iterable, Iterator, List, Literal, Optional, Tuple, Union
16
+
17
+ from tqdm.contrib.concurrent import thread_map
18
+
19
+ from . import constants
20
+ from .errors import EntryNotFoundError, HfHubHTTPError, XetAuthorizationError, XetRefreshTokenError
21
+ from .file_download import hf_hub_url
22
+ from .lfs import UploadInfo, lfs_upload, post_lfs_batch_info
23
+ from .utils import (
24
+ FORBIDDEN_FOLDERS,
25
+ XetTokenType,
26
+ chunk_iterable,
27
+ fetch_xet_connection_info_from_repo_info,
28
+ get_session,
29
+ hf_raise_for_status,
30
+ logging,
31
+ sha,
32
+ tqdm_stream_file,
33
+ validate_hf_hub_args,
34
+ )
35
+ from .utils import tqdm as hf_tqdm
36
+ from .utils.tqdm import _get_progress_bar_context
37
+
38
+
39
+ if TYPE_CHECKING:
40
+ from .hf_api import RepoFile
41
+
42
+
43
+ logger = logging.get_logger(__name__)
44
+
45
+
46
+ UploadMode = Literal["lfs", "regular"]
47
+
48
+ # Max is 1,000 per request on the Hub for HfApi.get_paths_info
49
+ # Otherwise we get:
50
+ # HfHubHTTPError: 413 Client Error: Payload Too Large for url: https://huggingface.co/api/datasets/xxx (Request ID: xxx)\n\ntoo many parameters
51
+ # See https://github.com/huggingface/huggingface_hub/issues/1503
52
+ FETCH_LFS_BATCH_SIZE = 500
53
+
54
+ UPLOAD_BATCH_MAX_NUM_FILES = 256
55
+
56
+
57
+ @dataclass
58
+ class CommitOperationDelete:
59
+ """
60
+ Data structure holding necessary info to delete a file or a folder from a repository
61
+ on the Hub.
62
+
63
+ Args:
64
+ path_in_repo (`str`):
65
+ Relative filepath in the repo, for example: `"checkpoints/1fec34a/weights.bin"`
66
+ for a file or `"checkpoints/1fec34a/"` for a folder.
67
+ is_folder (`bool` or `Literal["auto"]`, *optional*)
68
+ Whether the Delete Operation applies to a folder or not. If "auto", the path
69
+ type (file or folder) is guessed automatically by looking if path ends with
70
+ a "/" (folder) or not (file). To explicitly set the path type, you can set
71
+ `is_folder=True` or `is_folder=False`.
72
+ """
73
+
74
+ path_in_repo: str
75
+ is_folder: Union[bool, Literal["auto"]] = "auto"
76
+
77
+ def __post_init__(self):
78
+ self.path_in_repo = _validate_path_in_repo(self.path_in_repo)
79
+
80
+ if self.is_folder == "auto":
81
+ self.is_folder = self.path_in_repo.endswith("/")
82
+ if not isinstance(self.is_folder, bool):
83
+ raise ValueError(
84
+ f"Wrong value for `is_folder`. Must be one of [`True`, `False`, `'auto'`]. Got '{self.is_folder}'."
85
+ )
86
+
87
+
88
+ @dataclass
89
+ class CommitOperationCopy:
90
+ """
91
+ Data structure holding necessary info to copy a file in a repository on the Hub.
92
+
93
+ Limitations:
94
+ - Only LFS files can be copied. To copy a regular file, you need to download it locally and re-upload it
95
+ - Cross-repository copies are not supported.
96
+
97
+ Note: you can combine a [`CommitOperationCopy`] and a [`CommitOperationDelete`] to rename an LFS file on the Hub.
98
+
99
+ Args:
100
+ src_path_in_repo (`str`):
101
+ Relative filepath in the repo of the file to be copied, e.g. `"checkpoints/1fec34a/weights.bin"`.
102
+ path_in_repo (`str`):
103
+ Relative filepath in the repo where to copy the file, e.g. `"checkpoints/1fec34a/weights_copy.bin"`.
104
+ src_revision (`str`, *optional*):
105
+ The git revision of the file to be copied. Can be any valid git revision.
106
+ Default to the target commit revision.
107
+ """
108
+
109
+ src_path_in_repo: str
110
+ path_in_repo: str
111
+ src_revision: Optional[str] = None
112
+ # set to the OID of the file to be copied if it has already been uploaded
113
+ # useful to determine if a commit will be empty or not.
114
+ _src_oid: Optional[str] = None
115
+ # set to the OID of the file to copy to if it has already been uploaded
116
+ # useful to determine if a commit will be empty or not.
117
+ _dest_oid: Optional[str] = None
118
+
119
+ def __post_init__(self):
120
+ self.src_path_in_repo = _validate_path_in_repo(self.src_path_in_repo)
121
+ self.path_in_repo = _validate_path_in_repo(self.path_in_repo)
122
+
123
+
124
+ @dataclass
125
+ class CommitOperationAdd:
126
+ """
127
+ Data structure holding necessary info to upload a file to a repository on the Hub.
128
+
129
+ Args:
130
+ path_in_repo (`str`):
131
+ Relative filepath in the repo, for example: `"checkpoints/1fec34a/weights.bin"`
132
+ path_or_fileobj (`str`, `Path`, `bytes`, or `BinaryIO`):
133
+ Either:
134
+ - a path to a local file (as `str` or `pathlib.Path`) to upload
135
+ - a buffer of bytes (`bytes`) holding the content of the file to upload
136
+ - a "file object" (subclass of `io.BufferedIOBase`), typically obtained
137
+ with `open(path, "rb")`. It must support `seek()` and `tell()` methods.
138
+
139
+ Raises:
140
+ [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
141
+ If `path_or_fileobj` is not one of `str`, `Path`, `bytes` or `io.BufferedIOBase`.
142
+ [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
143
+ If `path_or_fileobj` is a `str` or `Path` but not a path to an existing file.
144
+ [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
145
+ If `path_or_fileobj` is a `io.BufferedIOBase` but it doesn't support both
146
+ `seek()` and `tell()`.
147
+ """
148
+
149
+ path_in_repo: str
150
+ path_or_fileobj: Union[str, Path, bytes, BinaryIO]
151
+ upload_info: UploadInfo = field(init=False, repr=False)
152
+
153
+ # Internal attributes
154
+
155
+ # set to "lfs" or "regular" once known
156
+ _upload_mode: Optional[UploadMode] = field(init=False, repr=False, default=None)
157
+
158
+ # set to True if .gitignore rules prevent the file from being uploaded as LFS
159
+ # (server-side check)
160
+ _should_ignore: Optional[bool] = field(init=False, repr=False, default=None)
161
+
162
+ # set to the remote OID of the file if it has already been uploaded
163
+ # useful to determine if a commit will be empty or not
164
+ _remote_oid: Optional[str] = field(init=False, repr=False, default=None)
165
+
166
+ # set to True once the file has been uploaded as LFS
167
+ _is_uploaded: bool = field(init=False, repr=False, default=False)
168
+
169
+ # set to True once the file has been committed
170
+ _is_committed: bool = field(init=False, repr=False, default=False)
171
+
172
+ def __post_init__(self) -> None:
173
+ """Validates `path_or_fileobj` and compute `upload_info`."""
174
+ self.path_in_repo = _validate_path_in_repo(self.path_in_repo)
175
+
176
+ # Validate `path_or_fileobj` value
177
+ if isinstance(self.path_or_fileobj, Path):
178
+ self.path_or_fileobj = str(self.path_or_fileobj)
179
+ if isinstance(self.path_or_fileobj, str):
180
+ path_or_fileobj = os.path.normpath(os.path.expanduser(self.path_or_fileobj))
181
+ if not os.path.isfile(path_or_fileobj):
182
+ raise ValueError(f"Provided path: '{path_or_fileobj}' is not a file on the local file system")
183
+ elif not isinstance(self.path_or_fileobj, (io.BufferedIOBase, bytes)):
184
+ # ^^ Inspired from: https://stackoverflow.com/questions/44584829/how-to-determine-if-file-is-opened-in-binary-or-text-mode
185
+ raise ValueError(
186
+ "path_or_fileobj must be either an instance of str, bytes or"
187
+ " io.BufferedIOBase. If you passed a file-like object, make sure it is"
188
+ " in binary mode."
189
+ )
190
+ if isinstance(self.path_or_fileobj, io.BufferedIOBase):
191
+ try:
192
+ self.path_or_fileobj.tell()
193
+ self.path_or_fileobj.seek(0, os.SEEK_CUR)
194
+ except (OSError, AttributeError) as exc:
195
+ raise ValueError(
196
+ "path_or_fileobj is a file-like object but does not implement seek() and tell()"
197
+ ) from exc
198
+
199
+ # Compute "upload_info" attribute
200
+ if isinstance(self.path_or_fileobj, str):
201
+ self.upload_info = UploadInfo.from_path(self.path_or_fileobj)
202
+ elif isinstance(self.path_or_fileobj, bytes):
203
+ self.upload_info = UploadInfo.from_bytes(self.path_or_fileobj)
204
+ else:
205
+ self.upload_info = UploadInfo.from_fileobj(self.path_or_fileobj)
206
+
207
+ @contextmanager
208
+ def as_file(self, with_tqdm: bool = False) -> Iterator[BinaryIO]:
209
+ """
210
+ A context manager that yields a file-like object allowing to read the underlying
211
+ data behind `path_or_fileobj`.
212
+
213
+ Args:
214
+ with_tqdm (`bool`, *optional*, defaults to `False`):
215
+ If True, iterating over the file object will display a progress bar. Only
216
+ works if the file-like object is a path to a file. Pure bytes and buffers
217
+ are not supported.
218
+
219
+ Example:
220
+
221
+ ```python
222
+ >>> operation = CommitOperationAdd(
223
+ ... path_in_repo="remote/dir/weights.h5",
224
+ ... path_or_fileobj="./local/weights.h5",
225
+ ... )
226
+ CommitOperationAdd(path_in_repo='remote/dir/weights.h5', path_or_fileobj='./local/weights.h5')
227
+
228
+ >>> with operation.as_file() as file:
229
+ ... content = file.read()
230
+
231
+ >>> with operation.as_file(with_tqdm=True) as file:
232
+ ... while True:
233
+ ... data = file.read(1024)
234
+ ... if not data:
235
+ ... break
236
+ config.json: 100%|█████████████████████████| 8.19k/8.19k [00:02<00:00, 3.72kB/s]
237
+
238
+ >>> with operation.as_file(with_tqdm=True) as file:
239
+ ... requests.put(..., data=file)
240
+ config.json: 100%|█████████████████████████| 8.19k/8.19k [00:02<00:00, 3.72kB/s]
241
+ ```
242
+ """
243
+ if isinstance(self.path_or_fileobj, str) or isinstance(self.path_or_fileobj, Path):
244
+ if with_tqdm:
245
+ with tqdm_stream_file(self.path_or_fileobj) as file:
246
+ yield file
247
+ else:
248
+ with open(self.path_or_fileobj, "rb") as file:
249
+ yield file
250
+ elif isinstance(self.path_or_fileobj, bytes):
251
+ yield io.BytesIO(self.path_or_fileobj)
252
+ elif isinstance(self.path_or_fileobj, io.BufferedIOBase):
253
+ prev_pos = self.path_or_fileobj.tell()
254
+ yield self.path_or_fileobj
255
+ self.path_or_fileobj.seek(prev_pos, io.SEEK_SET)
256
+
257
+ def b64content(self) -> bytes:
258
+ """
259
+ The base64-encoded content of `path_or_fileobj`
260
+
261
+ Returns: `bytes`
262
+ """
263
+ with self.as_file() as file:
264
+ return base64.b64encode(file.read())
265
+
266
+ @property
267
+ def _local_oid(self) -> Optional[str]:
268
+ """Return the OID of the local file.
269
+
270
+ This OID is then compared to `self._remote_oid` to check if the file has changed compared to the remote one.
271
+ If the file did not change, we won't upload it again to prevent empty commits.
272
+
273
+ For LFS files, the OID corresponds to the SHA256 of the file content (used a LFS ref).
274
+ For regular files, the OID corresponds to the SHA1 of the file content.
275
+ Note: this is slightly different to git OID computation since the oid of an LFS file is usually the git-SHA1 of the
276
+ pointer file content (not the actual file content). However, using the SHA256 is enough to detect changes
277
+ and more convenient client-side.
278
+ """
279
+ if self._upload_mode is None:
280
+ return None
281
+ elif self._upload_mode == "lfs":
282
+ return self.upload_info.sha256.hex()
283
+ else:
284
+ # Regular file => compute sha1
285
+ # => no need to read by chunk since the file is guaranteed to be <=5MB.
286
+ with self.as_file() as file:
287
+ return sha.git_hash(file.read())
288
+
289
+
290
+ def _validate_path_in_repo(path_in_repo: str) -> str:
291
+ # Validate `path_in_repo` value to prevent a server-side issue
292
+ if path_in_repo.startswith("/"):
293
+ path_in_repo = path_in_repo[1:]
294
+ if path_in_repo == "." or path_in_repo == ".." or path_in_repo.startswith("../"):
295
+ raise ValueError(f"Invalid `path_in_repo` in CommitOperation: '{path_in_repo}'")
296
+ if path_in_repo.startswith("./"):
297
+ path_in_repo = path_in_repo[2:]
298
+ for forbidden in FORBIDDEN_FOLDERS:
299
+ if any(part == forbidden for part in path_in_repo.split("/")):
300
+ raise ValueError(
301
+ f"Invalid `path_in_repo` in CommitOperation: cannot update files under a '{forbidden}/' folder (path:"
302
+ f" '{path_in_repo}')."
303
+ )
304
+ return path_in_repo
305
+
306
+
307
+ CommitOperation = Union[CommitOperationAdd, CommitOperationCopy, CommitOperationDelete]
308
+
309
+
310
+ def _warn_on_overwriting_operations(operations: List[CommitOperation]) -> None:
311
+ """
312
+ Warn user when a list of operations is expected to overwrite itself in a single
313
+ commit.
314
+
315
+ Rules:
316
+ - If a filepath is updated by multiple `CommitOperationAdd` operations, a warning
317
+ message is triggered.
318
+ - If a filepath is updated at least once by a `CommitOperationAdd` and then deleted
319
+ by a `CommitOperationDelete`, a warning is triggered.
320
+ - If a `CommitOperationDelete` deletes a filepath that is then updated by a
321
+ `CommitOperationAdd`, no warning is triggered. This is usually useless (no need to
322
+ delete before upload) but can happen if a user deletes an entire folder and then
323
+ add new files to it.
324
+ """
325
+ nb_additions_per_path: Dict[str, int] = defaultdict(int)
326
+ for operation in operations:
327
+ path_in_repo = operation.path_in_repo
328
+ if isinstance(operation, CommitOperationAdd):
329
+ if nb_additions_per_path[path_in_repo] > 0:
330
+ warnings.warn(
331
+ "About to update multiple times the same file in the same commit:"
332
+ f" '{path_in_repo}'. This can cause undesired inconsistencies in"
333
+ " your repo."
334
+ )
335
+ nb_additions_per_path[path_in_repo] += 1
336
+ for parent in PurePosixPath(path_in_repo).parents:
337
+ # Also keep track of number of updated files per folder
338
+ # => warns if deleting a folder overwrite some contained files
339
+ nb_additions_per_path[str(parent)] += 1
340
+ if isinstance(operation, CommitOperationDelete):
341
+ if nb_additions_per_path[str(PurePosixPath(path_in_repo))] > 0:
342
+ if operation.is_folder:
343
+ warnings.warn(
344
+ "About to delete a folder containing files that have just been"
345
+ f" updated within the same commit: '{path_in_repo}'. This can"
346
+ " cause undesired inconsistencies in your repo."
347
+ )
348
+ else:
349
+ warnings.warn(
350
+ "About to delete a file that have just been updated within the"
351
+ f" same commit: '{path_in_repo}'. This can cause undesired"
352
+ " inconsistencies in your repo."
353
+ )
354
+
355
+
356
+ @validate_hf_hub_args
357
+ def _upload_lfs_files(
358
+ *,
359
+ additions: List[CommitOperationAdd],
360
+ repo_type: str,
361
+ repo_id: str,
362
+ headers: Dict[str, str],
363
+ endpoint: Optional[str] = None,
364
+ num_threads: int = 5,
365
+ revision: Optional[str] = None,
366
+ ):
367
+ """
368
+ Uploads the content of `additions` to the Hub using the large file storage protocol.
369
+
370
+ Relevant external documentation:
371
+ - LFS Batch API: https://github.com/git-lfs/git-lfs/blob/main/docs/api/batch.md
372
+
373
+ Args:
374
+ additions (`List` of `CommitOperationAdd`):
375
+ The files to be uploaded
376
+ repo_type (`str`):
377
+ Type of the repo to upload to: `"model"`, `"dataset"` or `"space"`.
378
+ repo_id (`str`):
379
+ A namespace (user or an organization) and a repo name separated
380
+ by a `/`.
381
+ headers (`Dict[str, str]`):
382
+ Headers to use for the request, including authorization headers and user agent.
383
+ num_threads (`int`, *optional*):
384
+ The number of concurrent threads to use when uploading. Defaults to 5.
385
+ revision (`str`, *optional*):
386
+ The git revision to upload to.
387
+
388
+ Raises:
389
+ [`EnvironmentError`](https://docs.python.org/3/library/exceptions.html#EnvironmentError)
390
+ If an upload failed for any reason
391
+ [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
392
+ If the server returns malformed responses
393
+ [`HTTPError`](https://requests.readthedocs.io/en/latest/api/#requests.HTTPError)
394
+ If the LFS batch endpoint returned an HTTP error.
395
+ """
396
+ # Step 1: retrieve upload instructions from the LFS batch endpoint.
397
+ # Upload instructions are retrieved by chunk of 256 files to avoid reaching
398
+ # the payload limit.
399
+ batch_actions: List[Dict] = []
400
+ for chunk in chunk_iterable(additions, chunk_size=UPLOAD_BATCH_MAX_NUM_FILES):
401
+ batch_actions_chunk, batch_errors_chunk = post_lfs_batch_info(
402
+ upload_infos=[op.upload_info for op in chunk],
403
+ repo_id=repo_id,
404
+ repo_type=repo_type,
405
+ revision=revision,
406
+ endpoint=endpoint,
407
+ headers=headers,
408
+ token=None, # already passed in 'headers'
409
+ )
410
+
411
+ # If at least 1 error, we do not retrieve information for other chunks
412
+ if batch_errors_chunk:
413
+ message = "\n".join(
414
+ [
415
+ f"Encountered error for file with OID {err.get('oid')}: `{err.get('error', {}).get('message')}"
416
+ for err in batch_errors_chunk
417
+ ]
418
+ )
419
+ raise ValueError(f"LFS batch endpoint returned errors:\n{message}")
420
+
421
+ batch_actions += batch_actions_chunk
422
+ oid2addop = {add_op.upload_info.sha256.hex(): add_op for add_op in additions}
423
+
424
+ # Step 2: ignore files that have already been uploaded
425
+ filtered_actions = []
426
+ for action in batch_actions:
427
+ if action.get("actions") is None:
428
+ logger.debug(
429
+ f"Content of file {oid2addop[action['oid']].path_in_repo} is already"
430
+ " present upstream - skipping upload."
431
+ )
432
+ else:
433
+ filtered_actions.append(action)
434
+
435
+ if len(filtered_actions) == 0:
436
+ logger.debug("No LFS files to upload.")
437
+ return
438
+
439
+ # Step 3: upload files concurrently according to these instructions
440
+ def _wrapped_lfs_upload(batch_action) -> None:
441
+ try:
442
+ operation = oid2addop[batch_action["oid"]]
443
+ lfs_upload(operation=operation, lfs_batch_action=batch_action, headers=headers, endpoint=endpoint)
444
+ except Exception as exc:
445
+ raise RuntimeError(f"Error while uploading '{operation.path_in_repo}' to the Hub.") from exc
446
+
447
+ if constants.HF_HUB_ENABLE_HF_TRANSFER:
448
+ logger.debug(f"Uploading {len(filtered_actions)} LFS files to the Hub using `hf_transfer`.")
449
+ for action in hf_tqdm(filtered_actions, name="huggingface_hub.lfs_upload"):
450
+ _wrapped_lfs_upload(action)
451
+ elif len(filtered_actions) == 1:
452
+ logger.debug("Uploading 1 LFS file to the Hub")
453
+ _wrapped_lfs_upload(filtered_actions[0])
454
+ else:
455
+ logger.debug(
456
+ f"Uploading {len(filtered_actions)} LFS files to the Hub using up to {num_threads} threads concurrently"
457
+ )
458
+ thread_map(
459
+ _wrapped_lfs_upload,
460
+ filtered_actions,
461
+ desc=f"Upload {len(filtered_actions)} LFS files",
462
+ max_workers=num_threads,
463
+ tqdm_class=hf_tqdm,
464
+ )
465
+
466
+
467
+ @validate_hf_hub_args
468
+ def _upload_xet_files(
469
+ *,
470
+ additions: List[CommitOperationAdd],
471
+ repo_type: str,
472
+ repo_id: str,
473
+ headers: Dict[str, str],
474
+ endpoint: Optional[str] = None,
475
+ revision: Optional[str] = None,
476
+ create_pr: Optional[bool] = None,
477
+ ):
478
+ """
479
+ Uploads the content of `additions` to the Hub using the xet storage protocol.
480
+ This chunks the files and deduplicates the chunks before uploading them to xetcas storage.
481
+
482
+ Args:
483
+ additions (`List` of `CommitOperationAdd`):
484
+ The files to be uploaded.
485
+ repo_type (`str`):
486
+ Type of the repo to upload to: `"model"`, `"dataset"` or `"space"`.
487
+ repo_id (`str`):
488
+ A namespace (user or an organization) and a repo name separated
489
+ by a `/`.
490
+ headers (`Dict[str, str]`):
491
+ Headers to use for the request, including authorization headers and user agent.
492
+ endpoint: (`str`, *optional*):
493
+ The endpoint to use for the xetcas service. Defaults to `constants.ENDPOINT`.
494
+ revision (`str`, *optional*):
495
+ The git revision to upload to.
496
+ create_pr (`bool`, *optional*):
497
+ Whether or not to create a Pull Request with that commit.
498
+
499
+ Raises:
500
+ [`EnvironmentError`](https://docs.python.org/3/library/exceptions.html#EnvironmentError)
501
+ If an upload failed for any reason.
502
+ [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
503
+ If the server returns malformed responses or if the user is unauthorized to upload to xet storage.
504
+ [`HTTPError`](https://requests.readthedocs.io/en/latest/api/#requests.HTTPError)
505
+ If the LFS batch endpoint returned an HTTP error.
506
+
507
+ **How it works:**
508
+ The file download system uses Xet storage, which is a content-addressable storage system that breaks files into chunks
509
+ for efficient storage and transfer.
510
+
511
+ `hf_xet.upload_files` manages uploading files by:
512
+ - Taking a list of file paths to upload
513
+ - Breaking files into smaller chunks for efficient storage
514
+ - Avoiding duplicate storage by recognizing identical chunks across files
515
+ - Connecting to a storage server (CAS server) that manages these chunks
516
+
517
+ The upload process works like this:
518
+ 1. Create a local folder at ~/.cache/huggingface/xet/chunk-cache to store file chunks for reuse.
519
+ 2. Process files in parallel (up to 8 files at once):
520
+ 2.1. Read the file content.
521
+ 2.2. Split the file content into smaller chunks based on content patterns: each chunk gets a unique ID based on what's in it.
522
+ 2.3. For each chunk:
523
+ - Check if it already exists in storage.
524
+ - Skip uploading chunks that already exist.
525
+ 2.4. Group chunks into larger blocks for efficient transfer.
526
+ 2.5. Upload these blocks to the storage server.
527
+ 2.6. Create and upload information about how the file is structured.
528
+ 3. Return reference files that contain information about the uploaded files, which can be used later to download them.
529
+ """
530
+ if len(additions) == 0:
531
+ return
532
+ # at this point, we know that hf_xet is installed
533
+ from hf_xet import upload_bytes, upload_files
534
+
535
+ try:
536
+ xet_connection_info = fetch_xet_connection_info_from_repo_info(
537
+ token_type=XetTokenType.WRITE,
538
+ repo_id=repo_id,
539
+ repo_type=repo_type,
540
+ revision=revision,
541
+ headers=headers,
542
+ endpoint=endpoint,
543
+ params={"create_pr": "1"} if create_pr else None,
544
+ )
545
+ except HfHubHTTPError as e:
546
+ if e.response.status_code == 401:
547
+ raise XetAuthorizationError(
548
+ f"You are unauthorized to upload to xet storage for {repo_type}/{repo_id}. "
549
+ f"Please check that you have configured your access token with write access to the repo."
550
+ ) from e
551
+ raise
552
+
553
+ xet_endpoint = xet_connection_info.endpoint
554
+ access_token_info = (xet_connection_info.access_token, xet_connection_info.expiration_unix_epoch)
555
+
556
+ def token_refresher() -> Tuple[str, int]:
557
+ new_xet_connection = fetch_xet_connection_info_from_repo_info(
558
+ token_type=XetTokenType.WRITE,
559
+ repo_id=repo_id,
560
+ repo_type=repo_type,
561
+ revision=revision,
562
+ headers=headers,
563
+ endpoint=endpoint,
564
+ params={"create_pr": "1"} if create_pr else None,
565
+ )
566
+ if new_xet_connection is None:
567
+ raise XetRefreshTokenError("Failed to refresh xet token")
568
+ return new_xet_connection.access_token, new_xet_connection.expiration_unix_epoch
569
+
570
+ num_chunks = math.ceil(len(additions) / UPLOAD_BATCH_MAX_NUM_FILES)
571
+ num_chunks_num_digits = int(math.log10(num_chunks)) + 1
572
+ for i, chunk in enumerate(chunk_iterable(additions, chunk_size=UPLOAD_BATCH_MAX_NUM_FILES)):
573
+ _chunk = [op for op in chunk]
574
+
575
+ bytes_ops = [op for op in _chunk if isinstance(op.path_or_fileobj, bytes)]
576
+ paths_ops = [op for op in _chunk if isinstance(op.path_or_fileobj, (str, Path))]
577
+ expected_size = sum(op.upload_info.size for op in bytes_ops + paths_ops)
578
+
579
+ if num_chunks > 1:
580
+ description = f"Uploading Batch [{str(i + 1).zfill(num_chunks_num_digits)}/{num_chunks}]..."
581
+ else:
582
+ description = "Uploading..."
583
+ progress_cm = _get_progress_bar_context(
584
+ desc=description,
585
+ total=expected_size,
586
+ initial=0,
587
+ unit="B",
588
+ unit_scale=True,
589
+ name="huggingface_hub.xet_put",
590
+ log_level=logger.getEffectiveLevel(),
591
+ )
592
+ with progress_cm as progress:
593
+
594
+ def update_progress(increment: int):
595
+ progress.update(increment)
596
+
597
+ if len(paths_ops) > 0:
598
+ upload_files(
599
+ [str(op.path_or_fileobj) for op in paths_ops],
600
+ xet_endpoint,
601
+ access_token_info,
602
+ token_refresher,
603
+ update_progress,
604
+ repo_type,
605
+ )
606
+ if len(bytes_ops) > 0:
607
+ upload_bytes(
608
+ [op.path_or_fileobj for op in bytes_ops],
609
+ xet_endpoint,
610
+ access_token_info,
611
+ token_refresher,
612
+ update_progress,
613
+ repo_type,
614
+ )
615
+ return
616
+
617
+
618
+ def _validate_preupload_info(preupload_info: dict):
619
+ files = preupload_info.get("files")
620
+ if not isinstance(files, list):
621
+ raise ValueError("preupload_info is improperly formatted")
622
+ for file_info in files:
623
+ if not (
624
+ isinstance(file_info, dict)
625
+ and isinstance(file_info.get("path"), str)
626
+ and isinstance(file_info.get("uploadMode"), str)
627
+ and (file_info["uploadMode"] in ("lfs", "regular"))
628
+ ):
629
+ raise ValueError("preupload_info is improperly formatted:")
630
+ return preupload_info
631
+
632
+
633
+ @validate_hf_hub_args
634
+ def _fetch_upload_modes(
635
+ additions: Iterable[CommitOperationAdd],
636
+ repo_type: str,
637
+ repo_id: str,
638
+ headers: Dict[str, str],
639
+ revision: str,
640
+ endpoint: Optional[str] = None,
641
+ create_pr: bool = False,
642
+ gitignore_content: Optional[str] = None,
643
+ ) -> None:
644
+ """
645
+ Requests the Hub "preupload" endpoint to determine whether each input file should be uploaded as a regular git blob,
646
+ as a git LFS blob, or as a XET file. Input `additions` are mutated in-place with the upload mode.
647
+
648
+ Args:
649
+ additions (`Iterable` of :class:`CommitOperationAdd`):
650
+ Iterable of :class:`CommitOperationAdd` describing the files to
651
+ upload to the Hub.
652
+ repo_type (`str`):
653
+ Type of the repo to upload to: `"model"`, `"dataset"` or `"space"`.
654
+ repo_id (`str`):
655
+ A namespace (user or an organization) and a repo name separated
656
+ by a `/`.
657
+ headers (`Dict[str, str]`):
658
+ Headers to use for the request, including authorization headers and user agent.
659
+ revision (`str`):
660
+ The git revision to upload the files to. Can be any valid git revision.
661
+ gitignore_content (`str`, *optional*):
662
+ The content of the `.gitignore` file to know which files should be ignored. The order of priority
663
+ is to first check if `gitignore_content` is passed, then check if the `.gitignore` file is present
664
+ in the list of files to commit and finally default to the `.gitignore` file already hosted on the Hub
665
+ (if any).
666
+ Raises:
667
+ [`~utils.HfHubHTTPError`]
668
+ If the Hub API returned an error.
669
+ [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
670
+ If the Hub API response is improperly formatted.
671
+ """
672
+ endpoint = endpoint if endpoint is not None else constants.ENDPOINT
673
+
674
+ # Fetch upload mode (LFS or regular) chunk by chunk.
675
+ upload_modes: Dict[str, UploadMode] = {}
676
+ should_ignore_info: Dict[str, bool] = {}
677
+ oid_info: Dict[str, Optional[str]] = {}
678
+
679
+ for chunk in chunk_iterable(additions, 256):
680
+ payload: Dict = {
681
+ "files": [
682
+ {
683
+ "path": op.path_in_repo,
684
+ "sample": base64.b64encode(op.upload_info.sample).decode("ascii"),
685
+ "size": op.upload_info.size,
686
+ }
687
+ for op in chunk
688
+ ]
689
+ }
690
+ if gitignore_content is not None:
691
+ payload["gitIgnore"] = gitignore_content
692
+
693
+ resp = get_session().post(
694
+ f"{endpoint}/api/{repo_type}s/{repo_id}/preupload/{revision}",
695
+ json=payload,
696
+ headers=headers,
697
+ params={"create_pr": "1"} if create_pr else None,
698
+ )
699
+ hf_raise_for_status(resp)
700
+ preupload_info = _validate_preupload_info(resp.json())
701
+ upload_modes.update(**{file["path"]: file["uploadMode"] for file in preupload_info["files"]})
702
+ should_ignore_info.update(**{file["path"]: file["shouldIgnore"] for file in preupload_info["files"]})
703
+ oid_info.update(**{file["path"]: file.get("oid") for file in preupload_info["files"]})
704
+
705
+ # Set upload mode for each addition operation
706
+ for addition in additions:
707
+ addition._upload_mode = upload_modes[addition.path_in_repo]
708
+ addition._should_ignore = should_ignore_info[addition.path_in_repo]
709
+ addition._remote_oid = oid_info[addition.path_in_repo]
710
+
711
+ # Empty files cannot be uploaded as LFS (S3 would fail with a 501 Not Implemented)
712
+ # => empty files are uploaded as "regular" to still allow users to commit them.
713
+ for addition in additions:
714
+ if addition.upload_info.size == 0:
715
+ addition._upload_mode = "regular"
716
+
717
+
718
+ @validate_hf_hub_args
719
+ def _fetch_files_to_copy(
720
+ copies: Iterable[CommitOperationCopy],
721
+ repo_type: str,
722
+ repo_id: str,
723
+ headers: Dict[str, str],
724
+ revision: str,
725
+ endpoint: Optional[str] = None,
726
+ ) -> Dict[Tuple[str, Optional[str]], Union["RepoFile", bytes]]:
727
+ """
728
+ Fetch information about the files to copy.
729
+
730
+ For LFS files, we only need their metadata (file size and sha256) while for regular files
731
+ we need to download the raw content from the Hub.
732
+
733
+ Args:
734
+ copies (`Iterable` of :class:`CommitOperationCopy`):
735
+ Iterable of :class:`CommitOperationCopy` describing the files to
736
+ copy on the Hub.
737
+ repo_type (`str`):
738
+ Type of the repo to upload to: `"model"`, `"dataset"` or `"space"`.
739
+ repo_id (`str`):
740
+ A namespace (user or an organization) and a repo name separated
741
+ by a `/`.
742
+ headers (`Dict[str, str]`):
743
+ Headers to use for the request, including authorization headers and user agent.
744
+ revision (`str`):
745
+ The git revision to upload the files to. Can be any valid git revision.
746
+
747
+ Returns: `Dict[Tuple[str, Optional[str]], Union[RepoFile, bytes]]]`
748
+ Key is the file path and revision of the file to copy.
749
+ Value is the raw content as bytes (for regular files) or the file information as a RepoFile (for LFS files).
750
+
751
+ Raises:
752
+ [`~utils.HfHubHTTPError`]
753
+ If the Hub API returned an error.
754
+ [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
755
+ If the Hub API response is improperly formatted.
756
+ """
757
+ from .hf_api import HfApi, RepoFolder
758
+
759
+ hf_api = HfApi(endpoint=endpoint, headers=headers)
760
+ files_to_copy: Dict[Tuple[str, Optional[str]], Union["RepoFile", bytes]] = {}
761
+ # Store (path, revision) -> oid mapping
762
+ oid_info: Dict[Tuple[str, Optional[str]], Optional[str]] = {}
763
+ # 1. Fetch OIDs for destination paths in batches.
764
+ dest_paths = [op.path_in_repo for op in copies]
765
+ for offset in range(0, len(dest_paths), FETCH_LFS_BATCH_SIZE):
766
+ dest_repo_files = hf_api.get_paths_info(
767
+ repo_id=repo_id,
768
+ paths=dest_paths[offset : offset + FETCH_LFS_BATCH_SIZE],
769
+ revision=revision,
770
+ repo_type=repo_type,
771
+ )
772
+ for file in dest_repo_files:
773
+ if not isinstance(file, RepoFolder):
774
+ oid_info[(file.path, revision)] = file.blob_id
775
+
776
+ # 2. Group by source revision and fetch source file info in batches.
777
+ for src_revision, operations in groupby(copies, key=lambda op: op.src_revision):
778
+ operations = list(operations) # type: ignore
779
+ src_paths = [op.src_path_in_repo for op in operations]
780
+ for offset in range(0, len(src_paths), FETCH_LFS_BATCH_SIZE):
781
+ src_repo_files = hf_api.get_paths_info(
782
+ repo_id=repo_id,
783
+ paths=src_paths[offset : offset + FETCH_LFS_BATCH_SIZE],
784
+ revision=src_revision or revision,
785
+ repo_type=repo_type,
786
+ )
787
+
788
+ for src_repo_file in src_repo_files:
789
+ if isinstance(src_repo_file, RepoFolder):
790
+ raise NotImplementedError("Copying a folder is not implemented.")
791
+ oid_info[(src_repo_file.path, src_revision)] = src_repo_file.blob_id
792
+ # If it's an LFS file, store the RepoFile object. Otherwise, download raw bytes.
793
+ if src_repo_file.lfs:
794
+ files_to_copy[(src_repo_file.path, src_revision)] = src_repo_file
795
+ else:
796
+ # TODO: (optimization) download regular files to copy concurrently
797
+ url = hf_hub_url(
798
+ endpoint=endpoint,
799
+ repo_type=repo_type,
800
+ repo_id=repo_id,
801
+ revision=src_revision or revision,
802
+ filename=src_repo_file.path,
803
+ )
804
+ response = get_session().get(url, headers=headers)
805
+ hf_raise_for_status(response)
806
+ files_to_copy[(src_repo_file.path, src_revision)] = response.content
807
+ # 3. Ensure all operations found a corresponding file in the Hub
808
+ # and track src/dest OIDs for each operation.
809
+ for operation in operations:
810
+ if (operation.src_path_in_repo, src_revision) not in files_to_copy:
811
+ raise EntryNotFoundError(
812
+ f"Cannot copy {operation.src_path_in_repo} at revision "
813
+ f"{src_revision or revision}: file is missing on repo."
814
+ )
815
+ operation._src_oid = oid_info.get((operation.src_path_in_repo, operation.src_revision))
816
+ operation._dest_oid = oid_info.get((operation.path_in_repo, revision))
817
+ return files_to_copy
818
+
819
+
820
+ def _prepare_commit_payload(
821
+ operations: Iterable[CommitOperation],
822
+ files_to_copy: Dict[Tuple[str, Optional[str]], Union["RepoFile", bytes]],
823
+ commit_message: str,
824
+ commit_description: Optional[str] = None,
825
+ parent_commit: Optional[str] = None,
826
+ ) -> Iterable[Dict[str, Any]]:
827
+ """
828
+ Builds the payload to POST to the `/commit` API of the Hub.
829
+
830
+ Payload is returned as an iterator so that it can be streamed as a ndjson in the
831
+ POST request.
832
+
833
+ For more information, see:
834
+ - https://github.com/huggingface/huggingface_hub/issues/1085#issuecomment-1265208073
835
+ - http://ndjson.org/
836
+ """
837
+ commit_description = commit_description if commit_description is not None else ""
838
+
839
+ # 1. Send a header item with the commit metadata
840
+ header_value = {"summary": commit_message, "description": commit_description}
841
+ if parent_commit is not None:
842
+ header_value["parentCommit"] = parent_commit
843
+ yield {"key": "header", "value": header_value}
844
+
845
+ nb_ignored_files = 0
846
+
847
+ # 2. Send operations, one per line
848
+ for operation in operations:
849
+ # Skip ignored files
850
+ if isinstance(operation, CommitOperationAdd) and operation._should_ignore:
851
+ logger.debug(f"Skipping file '{operation.path_in_repo}' in commit (ignored by gitignore file).")
852
+ nb_ignored_files += 1
853
+ continue
854
+
855
+ # 2.a. Case adding a regular file
856
+ if isinstance(operation, CommitOperationAdd) and operation._upload_mode == "regular":
857
+ yield {
858
+ "key": "file",
859
+ "value": {
860
+ "content": operation.b64content().decode(),
861
+ "path": operation.path_in_repo,
862
+ "encoding": "base64",
863
+ },
864
+ }
865
+ # 2.b. Case adding an LFS file
866
+ elif isinstance(operation, CommitOperationAdd) and operation._upload_mode == "lfs":
867
+ yield {
868
+ "key": "lfsFile",
869
+ "value": {
870
+ "path": operation.path_in_repo,
871
+ "algo": "sha256",
872
+ "oid": operation.upload_info.sha256.hex(),
873
+ "size": operation.upload_info.size,
874
+ },
875
+ }
876
+ # 2.c. Case deleting a file or folder
877
+ elif isinstance(operation, CommitOperationDelete):
878
+ yield {
879
+ "key": "deletedFolder" if operation.is_folder else "deletedFile",
880
+ "value": {"path": operation.path_in_repo},
881
+ }
882
+ # 2.d. Case copying a file or folder
883
+ elif isinstance(operation, CommitOperationCopy):
884
+ file_to_copy = files_to_copy[(operation.src_path_in_repo, operation.src_revision)]
885
+ if isinstance(file_to_copy, bytes):
886
+ yield {
887
+ "key": "file",
888
+ "value": {
889
+ "content": base64.b64encode(file_to_copy).decode(),
890
+ "path": operation.path_in_repo,
891
+ "encoding": "base64",
892
+ },
893
+ }
894
+ elif file_to_copy.lfs:
895
+ yield {
896
+ "key": "lfsFile",
897
+ "value": {
898
+ "path": operation.path_in_repo,
899
+ "algo": "sha256",
900
+ "oid": file_to_copy.lfs.sha256,
901
+ },
902
+ }
903
+ else:
904
+ raise ValueError(
905
+ "Malformed files_to_copy (should be raw file content as bytes or RepoFile objects with LFS info."
906
+ )
907
+ # 2.e. Never expected to happen
908
+ else:
909
+ raise ValueError(
910
+ f"Unknown operation to commit. Operation: {operation}. Upload mode:"
911
+ f" {getattr(operation, '_upload_mode', None)}"
912
+ )
913
+
914
+ if nb_ignored_files > 0:
915
+ logger.info(f"Skipped {nb_ignored_files} file(s) in commit (ignored by gitignore file).")
.venv/lib/python3.13/site-packages/huggingface_hub/_commit_scheduler.py ADDED
@@ -0,0 +1,353 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import atexit
2
+ import logging
3
+ import os
4
+ import time
5
+ from concurrent.futures import Future
6
+ from dataclasses import dataclass
7
+ from io import SEEK_END, SEEK_SET, BytesIO
8
+ from pathlib import Path
9
+ from threading import Lock, Thread
10
+ from typing import Dict, List, Optional, Union
11
+
12
+ from .hf_api import DEFAULT_IGNORE_PATTERNS, CommitInfo, CommitOperationAdd, HfApi
13
+ from .utils import filter_repo_objects
14
+
15
+
16
+ logger = logging.getLogger(__name__)
17
+
18
+
19
+ @dataclass(frozen=True)
20
+ class _FileToUpload:
21
+ """Temporary dataclass to store info about files to upload. Not meant to be used directly."""
22
+
23
+ local_path: Path
24
+ path_in_repo: str
25
+ size_limit: int
26
+ last_modified: float
27
+
28
+
29
+ class CommitScheduler:
30
+ """
31
+ Scheduler to upload a local folder to the Hub at regular intervals (e.g. push to hub every 5 minutes).
32
+
33
+ The recommended way to use the scheduler is to use it as a context manager. This ensures that the scheduler is
34
+ properly stopped and the last commit is triggered when the script ends. The scheduler can also be stopped manually
35
+ with the `stop` method. Checkout the [upload guide](https://huggingface.co/docs/huggingface_hub/guides/upload#scheduled-uploads)
36
+ to learn more about how to use it.
37
+
38
+ Args:
39
+ repo_id (`str`):
40
+ The id of the repo to commit to.
41
+ folder_path (`str` or `Path`):
42
+ Path to the local folder to upload regularly.
43
+ every (`int` or `float`, *optional*):
44
+ The number of minutes between each commit. Defaults to 5 minutes.
45
+ path_in_repo (`str`, *optional*):
46
+ Relative path of the directory in the repo, for example: `"checkpoints/"`. Defaults to the root folder
47
+ of the repository.
48
+ repo_type (`str`, *optional*):
49
+ The type of the repo to commit to. Defaults to `model`.
50
+ revision (`str`, *optional*):
51
+ The revision of the repo to commit to. Defaults to `main`.
52
+ private (`bool`, *optional*):
53
+ Whether to make the repo private. If `None` (default), the repo will be public unless the organization's default is private. This value is ignored if the repo already exists.
54
+ token (`str`, *optional*):
55
+ The token to use to commit to the repo. Defaults to the token saved on the machine.
56
+ allow_patterns (`List[str]` or `str`, *optional*):
57
+ If provided, only files matching at least one pattern are uploaded.
58
+ ignore_patterns (`List[str]` or `str`, *optional*):
59
+ If provided, files matching any of the patterns are not uploaded.
60
+ squash_history (`bool`, *optional*):
61
+ Whether to squash the history of the repo after each commit. Defaults to `False`. Squashing commits is
62
+ useful to avoid degraded performances on the repo when it grows too large.
63
+ hf_api (`HfApi`, *optional*):
64
+ The [`HfApi`] client to use to commit to the Hub. Can be set with custom settings (user agent, token,...).
65
+
66
+ Example:
67
+ ```py
68
+ >>> from pathlib import Path
69
+ >>> from huggingface_hub import CommitScheduler
70
+
71
+ # Scheduler uploads every 10 minutes
72
+ >>> csv_path = Path("watched_folder/data.csv")
73
+ >>> CommitScheduler(repo_id="test_scheduler", repo_type="dataset", folder_path=csv_path.parent, every=10)
74
+
75
+ >>> with csv_path.open("a") as f:
76
+ ... f.write("first line")
77
+
78
+ # Some time later (...)
79
+ >>> with csv_path.open("a") as f:
80
+ ... f.write("second line")
81
+ ```
82
+
83
+ Example using a context manager:
84
+ ```py
85
+ >>> from pathlib import Path
86
+ >>> from huggingface_hub import CommitScheduler
87
+
88
+ >>> with CommitScheduler(repo_id="test_scheduler", repo_type="dataset", folder_path="watched_folder", every=10) as scheduler:
89
+ ... csv_path = Path("watched_folder/data.csv")
90
+ ... with csv_path.open("a") as f:
91
+ ... f.write("first line")
92
+ ... (...)
93
+ ... with csv_path.open("a") as f:
94
+ ... f.write("second line")
95
+
96
+ # Scheduler is now stopped and last commit have been triggered
97
+ ```
98
+ """
99
+
100
+ def __init__(
101
+ self,
102
+ *,
103
+ repo_id: str,
104
+ folder_path: Union[str, Path],
105
+ every: Union[int, float] = 5,
106
+ path_in_repo: Optional[str] = None,
107
+ repo_type: Optional[str] = None,
108
+ revision: Optional[str] = None,
109
+ private: Optional[bool] = None,
110
+ token: Optional[str] = None,
111
+ allow_patterns: Optional[Union[List[str], str]] = None,
112
+ ignore_patterns: Optional[Union[List[str], str]] = None,
113
+ squash_history: bool = False,
114
+ hf_api: Optional["HfApi"] = None,
115
+ ) -> None:
116
+ self.api = hf_api or HfApi(token=token)
117
+
118
+ # Folder
119
+ self.folder_path = Path(folder_path).expanduser().resolve()
120
+ self.path_in_repo = path_in_repo or ""
121
+ self.allow_patterns = allow_patterns
122
+
123
+ if ignore_patterns is None:
124
+ ignore_patterns = []
125
+ elif isinstance(ignore_patterns, str):
126
+ ignore_patterns = [ignore_patterns]
127
+ self.ignore_patterns = ignore_patterns + DEFAULT_IGNORE_PATTERNS
128
+
129
+ if self.folder_path.is_file():
130
+ raise ValueError(f"'folder_path' must be a directory, not a file: '{self.folder_path}'.")
131
+ self.folder_path.mkdir(parents=True, exist_ok=True)
132
+
133
+ # Repository
134
+ repo_url = self.api.create_repo(repo_id=repo_id, private=private, repo_type=repo_type, exist_ok=True)
135
+ self.repo_id = repo_url.repo_id
136
+ self.repo_type = repo_type
137
+ self.revision = revision
138
+ self.token = token
139
+
140
+ # Keep track of already uploaded files
141
+ self.last_uploaded: Dict[Path, float] = {} # key is local path, value is timestamp
142
+
143
+ # Scheduler
144
+ if not every > 0:
145
+ raise ValueError(f"'every' must be a positive integer, not '{every}'.")
146
+ self.lock = Lock()
147
+ self.every = every
148
+ self.squash_history = squash_history
149
+
150
+ logger.info(f"Scheduled job to push '{self.folder_path}' to '{self.repo_id}' every {self.every} minutes.")
151
+ self._scheduler_thread = Thread(target=self._run_scheduler, daemon=True)
152
+ self._scheduler_thread.start()
153
+ atexit.register(self._push_to_hub)
154
+
155
+ self.__stopped = False
156
+
157
+ def stop(self) -> None:
158
+ """Stop the scheduler.
159
+
160
+ A stopped scheduler cannot be restarted. Mostly for tests purposes.
161
+ """
162
+ self.__stopped = True
163
+
164
+ def __enter__(self) -> "CommitScheduler":
165
+ return self
166
+
167
+ def __exit__(self, exc_type, exc_value, traceback) -> None:
168
+ # Upload last changes before exiting
169
+ self.trigger().result()
170
+ self.stop()
171
+ return
172
+
173
+ def _run_scheduler(self) -> None:
174
+ """Dumb thread waiting between each scheduled push to Hub."""
175
+ while True:
176
+ self.last_future = self.trigger()
177
+ time.sleep(self.every * 60)
178
+ if self.__stopped:
179
+ break
180
+
181
+ def trigger(self) -> Future:
182
+ """Trigger a `push_to_hub` and return a future.
183
+
184
+ This method is automatically called every `every` minutes. You can also call it manually to trigger a commit
185
+ immediately, without waiting for the next scheduled commit.
186
+ """
187
+ return self.api.run_as_future(self._push_to_hub)
188
+
189
+ def _push_to_hub(self) -> Optional[CommitInfo]:
190
+ if self.__stopped: # If stopped, already scheduled commits are ignored
191
+ return None
192
+
193
+ logger.info("(Background) scheduled commit triggered.")
194
+ try:
195
+ value = self.push_to_hub()
196
+ if self.squash_history:
197
+ logger.info("(Background) squashing repo history.")
198
+ self.api.super_squash_history(repo_id=self.repo_id, repo_type=self.repo_type, branch=self.revision)
199
+ return value
200
+ except Exception as e:
201
+ logger.error(f"Error while pushing to Hub: {e}") # Depending on the setup, error might be silenced
202
+ raise
203
+
204
+ def push_to_hub(self) -> Optional[CommitInfo]:
205
+ """
206
+ Push folder to the Hub and return the commit info.
207
+
208
+ <Tip warning={true}>
209
+
210
+ This method is not meant to be called directly. It is run in the background by the scheduler, respecting a
211
+ queue mechanism to avoid concurrent commits. Making a direct call to the method might lead to concurrency
212
+ issues.
213
+
214
+ </Tip>
215
+
216
+ The default behavior of `push_to_hub` is to assume an append-only folder. It lists all files in the folder and
217
+ uploads only changed files. If no changes are found, the method returns without committing anything. If you want
218
+ to change this behavior, you can inherit from [`CommitScheduler`] and override this method. This can be useful
219
+ for example to compress data together in a single file before committing. For more details and examples, check
220
+ out our [integration guide](https://huggingface.co/docs/huggingface_hub/main/en/guides/upload#scheduled-uploads).
221
+ """
222
+ # Check files to upload (with lock)
223
+ with self.lock:
224
+ logger.debug("Listing files to upload for scheduled commit.")
225
+
226
+ # List files from folder (taken from `_prepare_upload_folder_additions`)
227
+ relpath_to_abspath = {
228
+ path.relative_to(self.folder_path).as_posix(): path
229
+ for path in sorted(self.folder_path.glob("**/*")) # sorted to be deterministic
230
+ if path.is_file()
231
+ }
232
+ prefix = f"{self.path_in_repo.strip('/')}/" if self.path_in_repo else ""
233
+
234
+ # Filter with pattern + filter out unchanged files + retrieve current file size
235
+ files_to_upload: List[_FileToUpload] = []
236
+ for relpath in filter_repo_objects(
237
+ relpath_to_abspath.keys(), allow_patterns=self.allow_patterns, ignore_patterns=self.ignore_patterns
238
+ ):
239
+ local_path = relpath_to_abspath[relpath]
240
+ stat = local_path.stat()
241
+ if self.last_uploaded.get(local_path) is None or self.last_uploaded[local_path] != stat.st_mtime:
242
+ files_to_upload.append(
243
+ _FileToUpload(
244
+ local_path=local_path,
245
+ path_in_repo=prefix + relpath,
246
+ size_limit=stat.st_size,
247
+ last_modified=stat.st_mtime,
248
+ )
249
+ )
250
+
251
+ # Return if nothing to upload
252
+ if len(files_to_upload) == 0:
253
+ logger.debug("Dropping schedule commit: no changed file to upload.")
254
+ return None
255
+
256
+ # Convert `_FileToUpload` as `CommitOperationAdd` (=> compute file shas + limit to file size)
257
+ logger.debug("Removing unchanged files since previous scheduled commit.")
258
+ add_operations = [
259
+ CommitOperationAdd(
260
+ # Cap the file to its current size, even if the user append data to it while a scheduled commit is happening
261
+ path_or_fileobj=PartialFileIO(file_to_upload.local_path, size_limit=file_to_upload.size_limit),
262
+ path_in_repo=file_to_upload.path_in_repo,
263
+ )
264
+ for file_to_upload in files_to_upload
265
+ ]
266
+
267
+ # Upload files (append mode expected - no need for lock)
268
+ logger.debug("Uploading files for scheduled commit.")
269
+ commit_info = self.api.create_commit(
270
+ repo_id=self.repo_id,
271
+ repo_type=self.repo_type,
272
+ operations=add_operations,
273
+ commit_message="Scheduled Commit",
274
+ revision=self.revision,
275
+ )
276
+
277
+ # Successful commit: keep track of the latest "last_modified" for each file
278
+ for file in files_to_upload:
279
+ self.last_uploaded[file.local_path] = file.last_modified
280
+ return commit_info
281
+
282
+
283
+ class PartialFileIO(BytesIO):
284
+ """A file-like object that reads only the first part of a file.
285
+
286
+ Useful to upload a file to the Hub when the user might still be appending data to it. Only the first part of the
287
+ file is uploaded (i.e. the part that was available when the filesystem was first scanned).
288
+
289
+ In practice, only used internally by the CommitScheduler to regularly push a folder to the Hub with minimal
290
+ disturbance for the user. The object is passed to `CommitOperationAdd`.
291
+
292
+ Only supports `read`, `tell` and `seek` methods.
293
+
294
+ Args:
295
+ file_path (`str` or `Path`):
296
+ Path to the file to read.
297
+ size_limit (`int`):
298
+ The maximum number of bytes to read from the file. If the file is larger than this, only the first part
299
+ will be read (and uploaded).
300
+ """
301
+
302
+ def __init__(self, file_path: Union[str, Path], size_limit: int) -> None:
303
+ self._file_path = Path(file_path)
304
+ self._file = self._file_path.open("rb")
305
+ self._size_limit = min(size_limit, os.fstat(self._file.fileno()).st_size)
306
+
307
+ def __del__(self) -> None:
308
+ self._file.close()
309
+ return super().__del__()
310
+
311
+ def __repr__(self) -> str:
312
+ return f"<PartialFileIO file_path={self._file_path} size_limit={self._size_limit}>"
313
+
314
+ def __len__(self) -> int:
315
+ return self._size_limit
316
+
317
+ def __getattribute__(self, name: str):
318
+ if name.startswith("_") or name in ("read", "tell", "seek"): # only 3 public methods supported
319
+ return super().__getattribute__(name)
320
+ raise NotImplementedError(f"PartialFileIO does not support '{name}'.")
321
+
322
+ def tell(self) -> int:
323
+ """Return the current file position."""
324
+ return self._file.tell()
325
+
326
+ def seek(self, __offset: int, __whence: int = SEEK_SET) -> int:
327
+ """Change the stream position to the given offset.
328
+
329
+ Behavior is the same as a regular file, except that the position is capped to the size limit.
330
+ """
331
+ if __whence == SEEK_END:
332
+ # SEEK_END => set from the truncated end
333
+ __offset = len(self) + __offset
334
+ __whence = SEEK_SET
335
+
336
+ pos = self._file.seek(__offset, __whence)
337
+ if pos > self._size_limit:
338
+ return self._file.seek(self._size_limit)
339
+ return pos
340
+
341
+ def read(self, __size: Optional[int] = -1) -> bytes:
342
+ """Read at most `__size` bytes from the file.
343
+
344
+ Behavior is the same as a regular file, except that it is capped to the size limit.
345
+ """
346
+ current = self._file.tell()
347
+ if __size is None or __size < 0:
348
+ # Read until file limit
349
+ truncated_size = self._size_limit - current
350
+ else:
351
+ # Read until file limit or __size
352
+ truncated_size = min(__size, self._size_limit - current)
353
+ return self._file.read(truncated_size)
.venv/lib/python3.13/site-packages/huggingface_hub/_inference_endpoints.py ADDED
@@ -0,0 +1,413 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ from dataclasses import dataclass, field
3
+ from datetime import datetime
4
+ from enum import Enum
5
+ from typing import TYPE_CHECKING, Dict, Optional, Union
6
+
7
+ from huggingface_hub.errors import InferenceEndpointError, InferenceEndpointTimeoutError
8
+
9
+ from .utils import get_session, logging, parse_datetime
10
+
11
+
12
+ if TYPE_CHECKING:
13
+ from .hf_api import HfApi
14
+ from .inference._client import InferenceClient
15
+ from .inference._generated._async_client import AsyncInferenceClient
16
+
17
+ logger = logging.get_logger(__name__)
18
+
19
+
20
+ class InferenceEndpointStatus(str, Enum):
21
+ PENDING = "pending"
22
+ INITIALIZING = "initializing"
23
+ UPDATING = "updating"
24
+ UPDATE_FAILED = "updateFailed"
25
+ RUNNING = "running"
26
+ PAUSED = "paused"
27
+ FAILED = "failed"
28
+ SCALED_TO_ZERO = "scaledToZero"
29
+
30
+
31
+ class InferenceEndpointType(str, Enum):
32
+ PUBlIC = "public"
33
+ PROTECTED = "protected"
34
+ PRIVATE = "private"
35
+
36
+
37
+ @dataclass
38
+ class InferenceEndpoint:
39
+ """
40
+ Contains information about a deployed Inference Endpoint.
41
+
42
+ Args:
43
+ name (`str`):
44
+ The unique name of the Inference Endpoint.
45
+ namespace (`str`):
46
+ The namespace where the Inference Endpoint is located.
47
+ repository (`str`):
48
+ The name of the model repository deployed on this Inference Endpoint.
49
+ status ([`InferenceEndpointStatus`]):
50
+ The current status of the Inference Endpoint.
51
+ url (`str`, *optional*):
52
+ The URL of the Inference Endpoint, if available. Only a deployed Inference Endpoint will have a URL.
53
+ framework (`str`):
54
+ The machine learning framework used for the model.
55
+ revision (`str`):
56
+ The specific model revision deployed on the Inference Endpoint.
57
+ task (`str`):
58
+ The task associated with the deployed model.
59
+ created_at (`datetime.datetime`):
60
+ The timestamp when the Inference Endpoint was created.
61
+ updated_at (`datetime.datetime`):
62
+ The timestamp of the last update of the Inference Endpoint.
63
+ type ([`InferenceEndpointType`]):
64
+ The type of the Inference Endpoint (public, protected, private).
65
+ raw (`Dict`):
66
+ The raw dictionary data returned from the API.
67
+ token (`str` or `bool`, *optional*):
68
+ Authentication token for the Inference Endpoint, if set when requesting the API. Will default to the
69
+ locally saved token if not provided. Pass `token=False` if you don't want to send your token to the server.
70
+
71
+ Example:
72
+ ```python
73
+ >>> from huggingface_hub import get_inference_endpoint
74
+ >>> endpoint = get_inference_endpoint("my-text-to-image")
75
+ >>> endpoint
76
+ InferenceEndpoint(name='my-text-to-image', ...)
77
+
78
+ # Get status
79
+ >>> endpoint.status
80
+ 'running'
81
+ >>> endpoint.url
82
+ 'https://my-text-to-image.region.vendor.endpoints.huggingface.cloud'
83
+
84
+ # Run inference
85
+ >>> endpoint.client.text_to_image(...)
86
+
87
+ # Pause endpoint to save $$$
88
+ >>> endpoint.pause()
89
+
90
+ # ...
91
+ # Resume and wait for deployment
92
+ >>> endpoint.resume()
93
+ >>> endpoint.wait()
94
+ >>> endpoint.client.text_to_image(...)
95
+ ```
96
+ """
97
+
98
+ # Field in __repr__
99
+ name: str = field(init=False)
100
+ namespace: str
101
+ repository: str = field(init=False)
102
+ status: InferenceEndpointStatus = field(init=False)
103
+ health_route: str = field(init=False)
104
+ url: Optional[str] = field(init=False)
105
+
106
+ # Other fields
107
+ framework: str = field(repr=False, init=False)
108
+ revision: str = field(repr=False, init=False)
109
+ task: str = field(repr=False, init=False)
110
+ created_at: datetime = field(repr=False, init=False)
111
+ updated_at: datetime = field(repr=False, init=False)
112
+ type: InferenceEndpointType = field(repr=False, init=False)
113
+
114
+ # Raw dict from the API
115
+ raw: Dict = field(repr=False)
116
+
117
+ # Internal fields
118
+ _token: Union[str, bool, None] = field(repr=False, compare=False)
119
+ _api: "HfApi" = field(repr=False, compare=False)
120
+
121
+ @classmethod
122
+ def from_raw(
123
+ cls, raw: Dict, namespace: str, token: Union[str, bool, None] = None, api: Optional["HfApi"] = None
124
+ ) -> "InferenceEndpoint":
125
+ """Initialize object from raw dictionary."""
126
+ if api is None:
127
+ from .hf_api import HfApi
128
+
129
+ api = HfApi()
130
+ if token is None:
131
+ token = api.token
132
+
133
+ # All other fields are populated in __post_init__
134
+ return cls(raw=raw, namespace=namespace, _token=token, _api=api)
135
+
136
+ def __post_init__(self) -> None:
137
+ """Populate fields from raw dictionary."""
138
+ self._populate_from_raw()
139
+
140
+ @property
141
+ def client(self) -> "InferenceClient":
142
+ """Returns a client to make predictions on this Inference Endpoint.
143
+
144
+ Returns:
145
+ [`InferenceClient`]: an inference client pointing to the deployed endpoint.
146
+
147
+ Raises:
148
+ [`InferenceEndpointError`]: If the Inference Endpoint is not yet deployed.
149
+ """
150
+ if self.url is None:
151
+ raise InferenceEndpointError(
152
+ "Cannot create a client for this Inference Endpoint as it is not yet deployed. "
153
+ "Please wait for the Inference Endpoint to be deployed using `endpoint.wait()` and try again."
154
+ )
155
+ from .inference._client import InferenceClient
156
+
157
+ return InferenceClient(
158
+ model=self.url,
159
+ token=self._token, # type: ignore[arg-type] # boolean token shouldn't be possible. In practice it's ok.
160
+ )
161
+
162
+ @property
163
+ def async_client(self) -> "AsyncInferenceClient":
164
+ """Returns a client to make predictions on this Inference Endpoint.
165
+
166
+ Returns:
167
+ [`AsyncInferenceClient`]: an asyncio-compatible inference client pointing to the deployed endpoint.
168
+
169
+ Raises:
170
+ [`InferenceEndpointError`]: If the Inference Endpoint is not yet deployed.
171
+ """
172
+ if self.url is None:
173
+ raise InferenceEndpointError(
174
+ "Cannot create a client for this Inference Endpoint as it is not yet deployed. "
175
+ "Please wait for the Inference Endpoint to be deployed using `endpoint.wait()` and try again."
176
+ )
177
+ from .inference._generated._async_client import AsyncInferenceClient
178
+
179
+ return AsyncInferenceClient(
180
+ model=self.url,
181
+ token=self._token, # type: ignore[arg-type] # boolean token shouldn't be possible. In practice it's ok.
182
+ )
183
+
184
+ def wait(self, timeout: Optional[int] = None, refresh_every: int = 5) -> "InferenceEndpoint":
185
+ """Wait for the Inference Endpoint to be deployed.
186
+
187
+ Information from the server will be fetched every 1s. If the Inference Endpoint is not deployed after `timeout`
188
+ seconds, a [`InferenceEndpointTimeoutError`] will be raised. The [`InferenceEndpoint`] will be mutated in place with the latest
189
+ data.
190
+
191
+ Args:
192
+ timeout (`int`, *optional*):
193
+ The maximum time to wait for the Inference Endpoint to be deployed, in seconds. If `None`, will wait
194
+ indefinitely.
195
+ refresh_every (`int`, *optional*):
196
+ The time to wait between each fetch of the Inference Endpoint status, in seconds. Defaults to 5s.
197
+
198
+ Returns:
199
+ [`InferenceEndpoint`]: the same Inference Endpoint, mutated in place with the latest data.
200
+
201
+ Raises:
202
+ [`InferenceEndpointError`]
203
+ If the Inference Endpoint ended up in a failed state.
204
+ [`InferenceEndpointTimeoutError`]
205
+ If the Inference Endpoint is not deployed after `timeout` seconds.
206
+ """
207
+ if timeout is not None and timeout < 0:
208
+ raise ValueError("`timeout` cannot be negative.")
209
+ if refresh_every <= 0:
210
+ raise ValueError("`refresh_every` must be positive.")
211
+
212
+ start = time.time()
213
+ while True:
214
+ if self.status == InferenceEndpointStatus.FAILED:
215
+ raise InferenceEndpointError(
216
+ f"Inference Endpoint {self.name} failed to deploy. Please check the logs for more information."
217
+ )
218
+ if self.status == InferenceEndpointStatus.UPDATE_FAILED:
219
+ raise InferenceEndpointError(
220
+ f"Inference Endpoint {self.name} failed to update. Please check the logs for more information."
221
+ )
222
+ if self.status == InferenceEndpointStatus.RUNNING and self.url is not None:
223
+ # Verify the endpoint is actually reachable
224
+ _health_url = f"{self.url.rstrip('/')}/{self.health_route.lstrip('/')}"
225
+ response = get_session().get(_health_url, headers=self._api._build_hf_headers(token=self._token))
226
+ if response.status_code == 200:
227
+ logger.info("Inference Endpoint is ready to be used.")
228
+ return self
229
+
230
+ if timeout is not None:
231
+ if time.time() - start > timeout:
232
+ raise InferenceEndpointTimeoutError("Timeout while waiting for Inference Endpoint to be deployed.")
233
+ logger.info(f"Inference Endpoint is not deployed yet ({self.status}). Waiting {refresh_every}s...")
234
+ time.sleep(refresh_every)
235
+ self.fetch()
236
+
237
+ def fetch(self) -> "InferenceEndpoint":
238
+ """Fetch latest information about the Inference Endpoint.
239
+
240
+ Returns:
241
+ [`InferenceEndpoint`]: the same Inference Endpoint, mutated in place with the latest data.
242
+ """
243
+ obj = self._api.get_inference_endpoint(name=self.name, namespace=self.namespace, token=self._token) # type: ignore [arg-type]
244
+ self.raw = obj.raw
245
+ self._populate_from_raw()
246
+ return self
247
+
248
+ def update(
249
+ self,
250
+ *,
251
+ # Compute update
252
+ accelerator: Optional[str] = None,
253
+ instance_size: Optional[str] = None,
254
+ instance_type: Optional[str] = None,
255
+ min_replica: Optional[int] = None,
256
+ max_replica: Optional[int] = None,
257
+ scale_to_zero_timeout: Optional[int] = None,
258
+ # Model update
259
+ repository: Optional[str] = None,
260
+ framework: Optional[str] = None,
261
+ revision: Optional[str] = None,
262
+ task: Optional[str] = None,
263
+ custom_image: Optional[Dict] = None,
264
+ secrets: Optional[Dict[str, str]] = None,
265
+ ) -> "InferenceEndpoint":
266
+ """Update the Inference Endpoint.
267
+
268
+ This method allows the update of either the compute configuration, the deployed model, or both. All arguments are
269
+ optional but at least one must be provided.
270
+
271
+ This is an alias for [`HfApi.update_inference_endpoint`]. The current object is mutated in place with the
272
+ latest data from the server.
273
+
274
+ Args:
275
+ accelerator (`str`, *optional*):
276
+ The hardware accelerator to be used for inference (e.g. `"cpu"`).
277
+ instance_size (`str`, *optional*):
278
+ The size or type of the instance to be used for hosting the model (e.g. `"x4"`).
279
+ instance_type (`str`, *optional*):
280
+ The cloud instance type where the Inference Endpoint will be deployed (e.g. `"intel-icl"`).
281
+ min_replica (`int`, *optional*):
282
+ The minimum number of replicas (instances) to keep running for the Inference Endpoint.
283
+ max_replica (`int`, *optional*):
284
+ The maximum number of replicas (instances) to scale to for the Inference Endpoint.
285
+ scale_to_zero_timeout (`int`, *optional*):
286
+ The duration in minutes before an inactive endpoint is scaled to zero.
287
+
288
+ repository (`str`, *optional*):
289
+ The name of the model repository associated with the Inference Endpoint (e.g. `"gpt2"`).
290
+ framework (`str`, *optional*):
291
+ The machine learning framework used for the model (e.g. `"custom"`).
292
+ revision (`str`, *optional*):
293
+ The specific model revision to deploy on the Inference Endpoint (e.g. `"6c0e6080953db56375760c0471a8c5f2929baf11"`).
294
+ task (`str`, *optional*):
295
+ The task on which to deploy the model (e.g. `"text-classification"`).
296
+ custom_image (`Dict`, *optional*):
297
+ A custom Docker image to use for the Inference Endpoint. This is useful if you want to deploy an
298
+ Inference Endpoint running on the `text-generation-inference` (TGI) framework (see examples).
299
+ secrets (`Dict[str, str]`, *optional*):
300
+ Secret values to inject in the container environment.
301
+ Returns:
302
+ [`InferenceEndpoint`]: the same Inference Endpoint, mutated in place with the latest data.
303
+ """
304
+ # Make API call
305
+ obj = self._api.update_inference_endpoint(
306
+ name=self.name,
307
+ namespace=self.namespace,
308
+ accelerator=accelerator,
309
+ instance_size=instance_size,
310
+ instance_type=instance_type,
311
+ min_replica=min_replica,
312
+ max_replica=max_replica,
313
+ scale_to_zero_timeout=scale_to_zero_timeout,
314
+ repository=repository,
315
+ framework=framework,
316
+ revision=revision,
317
+ task=task,
318
+ custom_image=custom_image,
319
+ secrets=secrets,
320
+ token=self._token, # type: ignore [arg-type]
321
+ )
322
+
323
+ # Mutate current object
324
+ self.raw = obj.raw
325
+ self._populate_from_raw()
326
+ return self
327
+
328
+ def pause(self) -> "InferenceEndpoint":
329
+ """Pause the Inference Endpoint.
330
+
331
+ A paused Inference Endpoint will not be charged. It can be resumed at any time using [`InferenceEndpoint.resume`].
332
+ This is different than scaling the Inference Endpoint to zero with [`InferenceEndpoint.scale_to_zero`], which
333
+ would be automatically restarted when a request is made to it.
334
+
335
+ This is an alias for [`HfApi.pause_inference_endpoint`]. The current object is mutated in place with the
336
+ latest data from the server.
337
+
338
+ Returns:
339
+ [`InferenceEndpoint`]: the same Inference Endpoint, mutated in place with the latest data.
340
+ """
341
+ obj = self._api.pause_inference_endpoint(name=self.name, namespace=self.namespace, token=self._token) # type: ignore [arg-type]
342
+ self.raw = obj.raw
343
+ self._populate_from_raw()
344
+ return self
345
+
346
+ def resume(self, running_ok: bool = True) -> "InferenceEndpoint":
347
+ """Resume the Inference Endpoint.
348
+
349
+ This is an alias for [`HfApi.resume_inference_endpoint`]. The current object is mutated in place with the
350
+ latest data from the server.
351
+
352
+ Args:
353
+ running_ok (`bool`, *optional*):
354
+ If `True`, the method will not raise an error if the Inference Endpoint is already running. Defaults to
355
+ `True`.
356
+
357
+ Returns:
358
+ [`InferenceEndpoint`]: the same Inference Endpoint, mutated in place with the latest data.
359
+ """
360
+ obj = self._api.resume_inference_endpoint(
361
+ name=self.name, namespace=self.namespace, running_ok=running_ok, token=self._token
362
+ ) # type: ignore [arg-type]
363
+ self.raw = obj.raw
364
+ self._populate_from_raw()
365
+ return self
366
+
367
+ def scale_to_zero(self) -> "InferenceEndpoint":
368
+ """Scale Inference Endpoint to zero.
369
+
370
+ An Inference Endpoint scaled to zero will not be charged. It will be resume on the next request to it, with a
371
+ cold start delay. This is different than pausing the Inference Endpoint with [`InferenceEndpoint.pause`], which
372
+ would require a manual resume with [`InferenceEndpoint.resume`].
373
+
374
+ This is an alias for [`HfApi.scale_to_zero_inference_endpoint`]. The current object is mutated in place with the
375
+ latest data from the server.
376
+
377
+ Returns:
378
+ [`InferenceEndpoint`]: the same Inference Endpoint, mutated in place with the latest data.
379
+ """
380
+ obj = self._api.scale_to_zero_inference_endpoint(name=self.name, namespace=self.namespace, token=self._token) # type: ignore [arg-type]
381
+ self.raw = obj.raw
382
+ self._populate_from_raw()
383
+ return self
384
+
385
+ def delete(self) -> None:
386
+ """Delete the Inference Endpoint.
387
+
388
+ This operation is not reversible. If you don't want to be charged for an Inference Endpoint, it is preferable
389
+ to pause it with [`InferenceEndpoint.pause`] or scale it to zero with [`InferenceEndpoint.scale_to_zero`].
390
+
391
+ This is an alias for [`HfApi.delete_inference_endpoint`].
392
+ """
393
+ self._api.delete_inference_endpoint(name=self.name, namespace=self.namespace, token=self._token) # type: ignore [arg-type]
394
+
395
+ def _populate_from_raw(self) -> None:
396
+ """Populate fields from raw dictionary.
397
+
398
+ Called in __post_init__ + each time the Inference Endpoint is updated.
399
+ """
400
+ # Repr fields
401
+ self.name = self.raw["name"]
402
+ self.repository = self.raw["model"]["repository"]
403
+ self.status = self.raw["status"]["state"]
404
+ self.url = self.raw["status"].get("url")
405
+ self.health_route = self.raw["healthRoute"]
406
+
407
+ # Other fields
408
+ self.framework = self.raw["model"]["framework"]
409
+ self.revision = self.raw["model"]["revision"]
410
+ self.task = self.raw["model"]["task"]
411
+ self.created_at = parse_datetime(self.raw["status"]["createdAt"])
412
+ self.updated_at = parse_datetime(self.raw["status"]["updatedAt"])
413
+ self.type = self.raw["type"]
.venv/lib/python3.13/site-packages/huggingface_hub/_local_folder.py ADDED
@@ -0,0 +1,441 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024-present, the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Contains utilities to handle the `../.cache/huggingface` folder in local directories.
16
+
17
+ First discussed in https://github.com/huggingface/huggingface_hub/issues/1738 to store
18
+ download metadata when downloading files from the hub to a local directory (without
19
+ using the cache).
20
+
21
+ ./.cache/huggingface folder structure:
22
+ [4.0K] data
23
+ ├── [4.0K] .cache
24
+ │ └── [4.0K] huggingface
25
+ │ └── [4.0K] download
26
+ │ ├── [ 16] file.parquet.metadata
27
+ │ ├── [ 16] file.txt.metadata
28
+ │ └── [4.0K] folder
29
+ │ └── [ 16] file.parquet.metadata
30
+
31
+ ├── [6.5G] file.parquet
32
+ ├── [1.5K] file.txt
33
+ └── [4.0K] folder
34
+ └── [ 16] file.parquet
35
+
36
+
37
+ Download metadata file structure:
38
+ ```
39
+ # file.txt.metadata
40
+ 11c5a3d5811f50298f278a704980280950aedb10
41
+ a16a55fda99d2f2e7b69cce5cf93ff4ad3049930
42
+ 1712656091.123
43
+
44
+ # file.parquet.metadata
45
+ 11c5a3d5811f50298f278a704980280950aedb10
46
+ 7c5d3f4b8b76583b422fcb9189ad6c89d5d97a094541ce8932dce3ecabde1421
47
+ 1712656091.123
48
+ }
49
+ ```
50
+ """
51
+
52
+ import base64
53
+ import hashlib
54
+ import logging
55
+ import os
56
+ import time
57
+ from dataclasses import dataclass
58
+ from pathlib import Path
59
+ from typing import Optional
60
+
61
+ from .utils import WeakFileLock
62
+
63
+
64
+ logger = logging.getLogger(__name__)
65
+
66
+
67
+ @dataclass
68
+ class LocalDownloadFilePaths:
69
+ """
70
+ Paths to the files related to a download process in a local dir.
71
+
72
+ Returned by [`get_local_download_paths`].
73
+
74
+ Attributes:
75
+ file_path (`Path`):
76
+ Path where the file will be saved.
77
+ lock_path (`Path`):
78
+ Path to the lock file used to ensure atomicity when reading/writing metadata.
79
+ metadata_path (`Path`):
80
+ Path to the metadata file.
81
+ """
82
+
83
+ file_path: Path
84
+ lock_path: Path
85
+ metadata_path: Path
86
+
87
+ def incomplete_path(self, etag: str) -> Path:
88
+ """Return the path where a file will be temporarily downloaded before being moved to `file_path`."""
89
+ return self.metadata_path.parent / f"{_short_hash(self.metadata_path.name)}.{etag}.incomplete"
90
+
91
+
92
+ @dataclass(frozen=True)
93
+ class LocalUploadFilePaths:
94
+ """
95
+ Paths to the files related to an upload process in a local dir.
96
+
97
+ Returned by [`get_local_upload_paths`].
98
+
99
+ Attributes:
100
+ path_in_repo (`str`):
101
+ Path of the file in the repo.
102
+ file_path (`Path`):
103
+ Path where the file will be saved.
104
+ lock_path (`Path`):
105
+ Path to the lock file used to ensure atomicity when reading/writing metadata.
106
+ metadata_path (`Path`):
107
+ Path to the metadata file.
108
+ """
109
+
110
+ path_in_repo: str
111
+ file_path: Path
112
+ lock_path: Path
113
+ metadata_path: Path
114
+
115
+
116
+ @dataclass
117
+ class LocalDownloadFileMetadata:
118
+ """
119
+ Metadata about a file in the local directory related to a download process.
120
+
121
+ Attributes:
122
+ filename (`str`):
123
+ Path of the file in the repo.
124
+ commit_hash (`str`):
125
+ Commit hash of the file in the repo.
126
+ etag (`str`):
127
+ ETag of the file in the repo. Used to check if the file has changed.
128
+ For LFS files, this is the sha256 of the file. For regular files, it corresponds to the git hash.
129
+ timestamp (`int`):
130
+ Unix timestamp of when the metadata was saved i.e. when the metadata was accurate.
131
+ """
132
+
133
+ filename: str
134
+ commit_hash: str
135
+ etag: str
136
+ timestamp: float
137
+
138
+
139
+ @dataclass
140
+ class LocalUploadFileMetadata:
141
+ """
142
+ Metadata about a file in the local directory related to an upload process.
143
+ """
144
+
145
+ size: int
146
+
147
+ # Default values correspond to "we don't know yet"
148
+ timestamp: Optional[float] = None
149
+ should_ignore: Optional[bool] = None
150
+ sha256: Optional[str] = None
151
+ upload_mode: Optional[str] = None
152
+ remote_oid: Optional[str] = None
153
+ is_uploaded: bool = False
154
+ is_committed: bool = False
155
+
156
+ def save(self, paths: LocalUploadFilePaths) -> None:
157
+ """Save the metadata to disk."""
158
+ with WeakFileLock(paths.lock_path):
159
+ with paths.metadata_path.open("w") as f:
160
+ new_timestamp = time.time()
161
+ f.write(str(new_timestamp) + "\n")
162
+
163
+ f.write(str(self.size)) # never None
164
+ f.write("\n")
165
+
166
+ if self.should_ignore is not None:
167
+ f.write(str(int(self.should_ignore)))
168
+ f.write("\n")
169
+
170
+ if self.sha256 is not None:
171
+ f.write(self.sha256)
172
+ f.write("\n")
173
+
174
+ if self.upload_mode is not None:
175
+ f.write(self.upload_mode)
176
+ f.write("\n")
177
+
178
+ if self.remote_oid is not None:
179
+ f.write(self.remote_oid)
180
+ f.write("\n")
181
+
182
+ f.write(str(int(self.is_uploaded)) + "\n")
183
+ f.write(str(int(self.is_committed)) + "\n")
184
+
185
+ self.timestamp = new_timestamp
186
+
187
+
188
+ def get_local_download_paths(local_dir: Path, filename: str) -> LocalDownloadFilePaths:
189
+ """Compute paths to the files related to a download process.
190
+
191
+ Folders containing the paths are all guaranteed to exist.
192
+
193
+ Args:
194
+ local_dir (`Path`):
195
+ Path to the local directory in which files are downloaded.
196
+ filename (`str`):
197
+ Path of the file in the repo.
198
+
199
+ Return:
200
+ [`LocalDownloadFilePaths`]: the paths to the files (file_path, lock_path, metadata_path, incomplete_path).
201
+ """
202
+ # filename is the path in the Hub repository (separated by '/')
203
+ # make sure to have a cross platform transcription
204
+ sanitized_filename = os.path.join(*filename.split("/"))
205
+ if os.name == "nt":
206
+ if sanitized_filename.startswith("..\\") or "\\..\\" in sanitized_filename:
207
+ raise ValueError(
208
+ f"Invalid filename: cannot handle filename '{sanitized_filename}' on Windows. Please ask the repository"
209
+ " owner to rename this file."
210
+ )
211
+ file_path = local_dir / sanitized_filename
212
+ metadata_path = _huggingface_dir(local_dir) / "download" / f"{sanitized_filename}.metadata"
213
+ lock_path = metadata_path.with_suffix(".lock")
214
+
215
+ # Some Windows versions do not allow for paths longer than 255 characters.
216
+ # In this case, we must specify it as an extended path by using the "\\?\" prefix
217
+ if os.name == "nt":
218
+ if not str(local_dir).startswith("\\\\?\\") and len(os.path.abspath(lock_path)) > 255:
219
+ file_path = Path("\\\\?\\" + os.path.abspath(file_path))
220
+ lock_path = Path("\\\\?\\" + os.path.abspath(lock_path))
221
+ metadata_path = Path("\\\\?\\" + os.path.abspath(metadata_path))
222
+
223
+ file_path.parent.mkdir(parents=True, exist_ok=True)
224
+ metadata_path.parent.mkdir(parents=True, exist_ok=True)
225
+ return LocalDownloadFilePaths(file_path=file_path, lock_path=lock_path, metadata_path=metadata_path)
226
+
227
+
228
+ def get_local_upload_paths(local_dir: Path, filename: str) -> LocalUploadFilePaths:
229
+ """Compute paths to the files related to an upload process.
230
+
231
+ Folders containing the paths are all guaranteed to exist.
232
+
233
+ Args:
234
+ local_dir (`Path`):
235
+ Path to the local directory that is uploaded.
236
+ filename (`str`):
237
+ Path of the file in the repo.
238
+
239
+ Return:
240
+ [`LocalUploadFilePaths`]: the paths to the files (file_path, lock_path, metadata_path).
241
+ """
242
+ # filename is the path in the Hub repository (separated by '/')
243
+ # make sure to have a cross platform transcription
244
+ sanitized_filename = os.path.join(*filename.split("/"))
245
+ if os.name == "nt":
246
+ if sanitized_filename.startswith("..\\") or "\\..\\" in sanitized_filename:
247
+ raise ValueError(
248
+ f"Invalid filename: cannot handle filename '{sanitized_filename}' on Windows. Please ask the repository"
249
+ " owner to rename this file."
250
+ )
251
+ file_path = local_dir / sanitized_filename
252
+ metadata_path = _huggingface_dir(local_dir) / "upload" / f"{sanitized_filename}.metadata"
253
+ lock_path = metadata_path.with_suffix(".lock")
254
+
255
+ # Some Windows versions do not allow for paths longer than 255 characters.
256
+ # In this case, we must specify it as an extended path by using the "\\?\" prefix
257
+ if os.name == "nt":
258
+ if not str(local_dir).startswith("\\\\?\\") and len(os.path.abspath(lock_path)) > 255:
259
+ file_path = Path("\\\\?\\" + os.path.abspath(file_path))
260
+ lock_path = Path("\\\\?\\" + os.path.abspath(lock_path))
261
+ metadata_path = Path("\\\\?\\" + os.path.abspath(metadata_path))
262
+
263
+ file_path.parent.mkdir(parents=True, exist_ok=True)
264
+ metadata_path.parent.mkdir(parents=True, exist_ok=True)
265
+ return LocalUploadFilePaths(
266
+ path_in_repo=filename, file_path=file_path, lock_path=lock_path, metadata_path=metadata_path
267
+ )
268
+
269
+
270
+ def read_download_metadata(local_dir: Path, filename: str) -> Optional[LocalDownloadFileMetadata]:
271
+ """Read metadata about a file in the local directory related to a download process.
272
+
273
+ Args:
274
+ local_dir (`Path`):
275
+ Path to the local directory in which files are downloaded.
276
+ filename (`str`):
277
+ Path of the file in the repo.
278
+
279
+ Return:
280
+ `[LocalDownloadFileMetadata]` or `None`: the metadata if it exists, `None` otherwise.
281
+ """
282
+ paths = get_local_download_paths(local_dir, filename)
283
+ with WeakFileLock(paths.lock_path):
284
+ if paths.metadata_path.exists():
285
+ try:
286
+ with paths.metadata_path.open() as f:
287
+ commit_hash = f.readline().strip()
288
+ etag = f.readline().strip()
289
+ timestamp = float(f.readline().strip())
290
+ metadata = LocalDownloadFileMetadata(
291
+ filename=filename,
292
+ commit_hash=commit_hash,
293
+ etag=etag,
294
+ timestamp=timestamp,
295
+ )
296
+ except Exception as e:
297
+ # remove the metadata file if it is corrupted / not the right format
298
+ logger.warning(
299
+ f"Invalid metadata file {paths.metadata_path}: {e}. Removing it from disk and continue."
300
+ )
301
+ try:
302
+ paths.metadata_path.unlink()
303
+ except Exception as e:
304
+ logger.warning(f"Could not remove corrupted metadata file {paths.metadata_path}: {e}")
305
+
306
+ try:
307
+ # check if the file exists and hasn't been modified since the metadata was saved
308
+ stat = paths.file_path.stat()
309
+ if (
310
+ stat.st_mtime - 1 <= metadata.timestamp
311
+ ): # allow 1s difference as stat.st_mtime might not be precise
312
+ return metadata
313
+ logger.info(f"Ignored metadata for '{filename}' (outdated). Will re-compute hash.")
314
+ except FileNotFoundError:
315
+ # file does not exist => metadata is outdated
316
+ return None
317
+ return None
318
+
319
+
320
+ def read_upload_metadata(local_dir: Path, filename: str) -> LocalUploadFileMetadata:
321
+ """Read metadata about a file in the local directory related to an upload process.
322
+
323
+ TODO: factorize logic with `read_download_metadata`.
324
+
325
+ Args:
326
+ local_dir (`Path`):
327
+ Path to the local directory in which files are downloaded.
328
+ filename (`str`):
329
+ Path of the file in the repo.
330
+
331
+ Return:
332
+ `[LocalUploadFileMetadata]` or `None`: the metadata if it exists, `None` otherwise.
333
+ """
334
+ paths = get_local_upload_paths(local_dir, filename)
335
+ with WeakFileLock(paths.lock_path):
336
+ if paths.metadata_path.exists():
337
+ try:
338
+ with paths.metadata_path.open() as f:
339
+ timestamp = float(f.readline().strip())
340
+
341
+ size = int(f.readline().strip()) # never None
342
+
343
+ _should_ignore = f.readline().strip()
344
+ should_ignore = None if _should_ignore == "" else bool(int(_should_ignore))
345
+
346
+ _sha256 = f.readline().strip()
347
+ sha256 = None if _sha256 == "" else _sha256
348
+
349
+ _upload_mode = f.readline().strip()
350
+ upload_mode = None if _upload_mode == "" else _upload_mode
351
+ if upload_mode not in (None, "regular", "lfs"):
352
+ raise ValueError(f"Invalid upload mode in metadata {paths.path_in_repo}: {upload_mode}")
353
+
354
+ _remote_oid = f.readline().strip()
355
+ remote_oid = None if _remote_oid == "" else _remote_oid
356
+
357
+ is_uploaded = bool(int(f.readline().strip()))
358
+ is_committed = bool(int(f.readline().strip()))
359
+
360
+ metadata = LocalUploadFileMetadata(
361
+ timestamp=timestamp,
362
+ size=size,
363
+ should_ignore=should_ignore,
364
+ sha256=sha256,
365
+ upload_mode=upload_mode,
366
+ remote_oid=remote_oid,
367
+ is_uploaded=is_uploaded,
368
+ is_committed=is_committed,
369
+ )
370
+ except Exception as e:
371
+ # remove the metadata file if it is corrupted / not the right format
372
+ logger.warning(
373
+ f"Invalid metadata file {paths.metadata_path}: {e}. Removing it from disk and continue."
374
+ )
375
+ try:
376
+ paths.metadata_path.unlink()
377
+ except Exception as e:
378
+ logger.warning(f"Could not remove corrupted metadata file {paths.metadata_path}: {e}")
379
+
380
+ # TODO: can we do better?
381
+ if (
382
+ metadata.timestamp is not None
383
+ and metadata.is_uploaded # file was uploaded
384
+ and not metadata.is_committed # but not committed
385
+ and time.time() - metadata.timestamp > 20 * 3600 # and it's been more than 20 hours
386
+ ): # => we consider it as garbage-collected by S3
387
+ metadata.is_uploaded = False
388
+
389
+ # check if the file exists and hasn't been modified since the metadata was saved
390
+ try:
391
+ if metadata.timestamp is not None and paths.file_path.stat().st_mtime <= metadata.timestamp:
392
+ return metadata
393
+ logger.info(f"Ignored metadata for '{filename}' (outdated). Will re-compute hash.")
394
+ except FileNotFoundError:
395
+ # file does not exist => metadata is outdated
396
+ pass
397
+
398
+ # empty metadata => we don't know anything expect its size
399
+ return LocalUploadFileMetadata(size=paths.file_path.stat().st_size)
400
+
401
+
402
+ def write_download_metadata(local_dir: Path, filename: str, commit_hash: str, etag: str) -> None:
403
+ """Write metadata about a file in the local directory related to a download process.
404
+
405
+ Args:
406
+ local_dir (`Path`):
407
+ Path to the local directory in which files are downloaded.
408
+ """
409
+ paths = get_local_download_paths(local_dir, filename)
410
+ with WeakFileLock(paths.lock_path):
411
+ with paths.metadata_path.open("w") as f:
412
+ f.write(f"{commit_hash}\n{etag}\n{time.time()}\n")
413
+
414
+
415
+ def _huggingface_dir(local_dir: Path) -> Path:
416
+ """Return the path to the `.cache/huggingface` directory in a local directory."""
417
+ # Wrap in lru_cache to avoid overwriting the .gitignore file if called multiple times
418
+ path = local_dir / ".cache" / "huggingface"
419
+ path.mkdir(exist_ok=True, parents=True)
420
+
421
+ # Create a .gitignore file in the .cache/huggingface directory if it doesn't exist
422
+ # Should be thread-safe enough like this.
423
+ gitignore = path / ".gitignore"
424
+ gitignore_lock = path / ".gitignore.lock"
425
+ if not gitignore.exists():
426
+ try:
427
+ with WeakFileLock(gitignore_lock, timeout=0.1):
428
+ gitignore.write_text("*")
429
+ except IndexError:
430
+ pass
431
+ except OSError: # TimeoutError, FileNotFoundError, PermissionError, etc.
432
+ pass
433
+ try:
434
+ gitignore_lock.unlink()
435
+ except OSError:
436
+ pass
437
+ return path
438
+
439
+
440
+ def _short_hash(filename: str) -> str:
441
+ return base64.urlsafe_b64encode(hashlib.sha1(filename.encode()).digest()).decode()
.venv/lib/python3.13/site-packages/huggingface_hub/_login.py ADDED
@@ -0,0 +1,520 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Contains methods to log in to the Hub."""
15
+
16
+ import os
17
+ import subprocess
18
+ from getpass import getpass
19
+ from pathlib import Path
20
+ from typing import Optional
21
+
22
+ from . import constants
23
+ from .commands._cli_utils import ANSI
24
+ from .utils import (
25
+ capture_output,
26
+ get_token,
27
+ is_google_colab,
28
+ is_notebook,
29
+ list_credential_helpers,
30
+ logging,
31
+ run_subprocess,
32
+ set_git_credential,
33
+ unset_git_credential,
34
+ )
35
+ from .utils._auth import (
36
+ _get_token_by_name,
37
+ _get_token_from_environment,
38
+ _get_token_from_file,
39
+ _get_token_from_google_colab,
40
+ _save_stored_tokens,
41
+ _save_token,
42
+ get_stored_tokens,
43
+ )
44
+ from .utils._deprecation import _deprecate_arguments, _deprecate_positional_args
45
+
46
+
47
+ logger = logging.get_logger(__name__)
48
+
49
+ _HF_LOGO_ASCII = """
50
+ _| _| _| _| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _|_|_|_| _|_| _|_|_| _|_|_|_|
51
+ _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _|
52
+ _|_|_|_| _| _| _| _|_| _| _|_| _| _| _| _| _| _|_| _|_|_| _|_|_|_| _| _|_|_|
53
+ _| _| _| _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _|
54
+ _| _| _|_| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _| _| _| _|_|_| _|_|_|_|
55
+ """
56
+
57
+
58
+ @_deprecate_arguments(
59
+ version="1.0",
60
+ deprecated_args="write_permission",
61
+ custom_message="Fine-grained tokens added complexity to the permissions, making it irrelevant to check if a token has 'write' access.",
62
+ )
63
+ @_deprecate_positional_args(version="1.0")
64
+ def login(
65
+ token: Optional[str] = None,
66
+ *,
67
+ add_to_git_credential: bool = False,
68
+ new_session: bool = True,
69
+ write_permission: bool = False,
70
+ ) -> None:
71
+ """Login the machine to access the Hub.
72
+
73
+ The `token` is persisted in cache and set as a git credential. Once done, the machine
74
+ is logged in and the access token will be available across all `huggingface_hub`
75
+ components. If `token` is not provided, it will be prompted to the user either with
76
+ a widget (in a notebook) or via the terminal.
77
+
78
+ To log in from outside of a script, one can also use `huggingface-cli login` which is
79
+ a cli command that wraps [`login`].
80
+
81
+ <Tip>
82
+
83
+ [`login`] is a drop-in replacement method for [`notebook_login`] as it wraps and
84
+ extends its capabilities.
85
+
86
+ </Tip>
87
+
88
+ <Tip>
89
+
90
+ When the token is not passed, [`login`] will automatically detect if the script runs
91
+ in a notebook or not. However, this detection might not be accurate due to the
92
+ variety of notebooks that exists nowadays. If that is the case, you can always force
93
+ the UI by using [`notebook_login`] or [`interpreter_login`].
94
+
95
+ </Tip>
96
+
97
+ Args:
98
+ token (`str`, *optional*):
99
+ User access token to generate from https://huggingface.co/settings/token.
100
+ add_to_git_credential (`bool`, defaults to `False`):
101
+ If `True`, token will be set as git credential. If no git credential helper
102
+ is configured, a warning will be displayed to the user. If `token` is `None`,
103
+ the value of `add_to_git_credential` is ignored and will be prompted again
104
+ to the end user.
105
+ new_session (`bool`, defaults to `True`):
106
+ If `True`, will request a token even if one is already saved on the machine.
107
+ write_permission (`bool`):
108
+ Ignored and deprecated argument.
109
+ Raises:
110
+ [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
111
+ If an organization token is passed. Only personal account tokens are valid
112
+ to log in.
113
+ [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
114
+ If token is invalid.
115
+ [`ImportError`](https://docs.python.org/3/library/exceptions.html#ImportError)
116
+ If running in a notebook but `ipywidgets` is not installed.
117
+ """
118
+ if token is not None:
119
+ if not add_to_git_credential:
120
+ logger.info(
121
+ "The token has not been saved to the git credentials helper. Pass "
122
+ "`add_to_git_credential=True` in this function directly or "
123
+ "`--add-to-git-credential` if using via `huggingface-cli` if "
124
+ "you want to set the git credential as well."
125
+ )
126
+ _login(token, add_to_git_credential=add_to_git_credential)
127
+ elif is_notebook():
128
+ notebook_login(new_session=new_session)
129
+ else:
130
+ interpreter_login(new_session=new_session)
131
+
132
+
133
+ def logout(token_name: Optional[str] = None) -> None:
134
+ """Logout the machine from the Hub.
135
+
136
+ Token is deleted from the machine and removed from git credential.
137
+
138
+ Args:
139
+ token_name (`str`, *optional*):
140
+ Name of the access token to logout from. If `None`, will logout from all saved access tokens.
141
+ Raises:
142
+ [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError):
143
+ If the access token name is not found.
144
+ """
145
+ if get_token() is None and not get_stored_tokens(): # No active token and no saved access tokens
146
+ logger.warning("Not logged in!")
147
+ return
148
+ if not token_name:
149
+ # Delete all saved access tokens and token
150
+ for file_path in (constants.HF_TOKEN_PATH, constants.HF_STORED_TOKENS_PATH):
151
+ try:
152
+ Path(file_path).unlink()
153
+ except FileNotFoundError:
154
+ pass
155
+ logger.info("Successfully logged out from all access tokens.")
156
+ else:
157
+ _logout_from_token(token_name)
158
+ logger.info(f"Successfully logged out from access token: {token_name}.")
159
+
160
+ unset_git_credential()
161
+
162
+ # Check if still logged in
163
+ if _get_token_from_google_colab() is not None:
164
+ raise EnvironmentError(
165
+ "You are automatically logged in using a Google Colab secret.\n"
166
+ "To log out, you must unset the `HF_TOKEN` secret in your Colab settings."
167
+ )
168
+ if _get_token_from_environment() is not None:
169
+ raise EnvironmentError(
170
+ "Token has been deleted from your machine but you are still logged in.\n"
171
+ "To log out, you must clear out both `HF_TOKEN` and `HUGGING_FACE_HUB_TOKEN` environment variables."
172
+ )
173
+
174
+
175
+ def auth_switch(token_name: str, add_to_git_credential: bool = False) -> None:
176
+ """Switch to a different access token.
177
+
178
+ Args:
179
+ token_name (`str`):
180
+ Name of the access token to switch to.
181
+ add_to_git_credential (`bool`, defaults to `False`):
182
+ If `True`, token will be set as git credential. If no git credential helper
183
+ is configured, a warning will be displayed to the user. If `token` is `None`,
184
+ the value of `add_to_git_credential` is ignored and will be prompted again
185
+ to the end user.
186
+
187
+ Raises:
188
+ [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError):
189
+ If the access token name is not found.
190
+ """
191
+ token = _get_token_by_name(token_name)
192
+ if not token:
193
+ raise ValueError(f"Access token {token_name} not found in {constants.HF_STORED_TOKENS_PATH}")
194
+ # Write token to HF_TOKEN_PATH
195
+ _set_active_token(token_name, add_to_git_credential)
196
+ logger.info(f"The current active token is: {token_name}")
197
+ token_from_environment = _get_token_from_environment()
198
+ if token_from_environment is not None and token_from_environment != token:
199
+ logger.warning(
200
+ "The environment variable `HF_TOKEN` is set and will override the access token you've just switched to."
201
+ )
202
+
203
+
204
+ def auth_list() -> None:
205
+ """List all stored access tokens."""
206
+ tokens = get_stored_tokens()
207
+
208
+ if not tokens:
209
+ logger.info("No access tokens found.")
210
+ return
211
+ # Find current token
212
+ current_token = get_token()
213
+ current_token_name = None
214
+ for token_name in tokens:
215
+ if tokens.get(token_name) == current_token:
216
+ current_token_name = token_name
217
+ # Print header
218
+ max_offset = max(len("token"), max(len(token) for token in tokens)) + 2
219
+ print(f" {{:<{max_offset}}}| {{:<15}}".format("name", "token"))
220
+ print("-" * (max_offset + 2) + "|" + "-" * 15)
221
+
222
+ # Print saved access tokens
223
+ for token_name in tokens:
224
+ token = tokens.get(token_name, "<not set>")
225
+ masked_token = f"{token[:3]}****{token[-4:]}" if token != "<not set>" else token
226
+ is_current = "*" if token == current_token else " "
227
+
228
+ print(f"{is_current} {{:<{max_offset}}}| {{:<15}}".format(token_name, masked_token))
229
+
230
+ if _get_token_from_environment():
231
+ logger.warning(
232
+ "\nNote: Environment variable `HF_TOKEN` is set and is the current active token independently from the stored tokens listed above."
233
+ )
234
+ elif current_token_name is None:
235
+ logger.warning(
236
+ "\nNote: No active token is set and no environment variable `HF_TOKEN` is found. Use `huggingface-cli login` to log in."
237
+ )
238
+
239
+
240
+ ###
241
+ # Interpreter-based login (text)
242
+ ###
243
+
244
+
245
+ @_deprecate_arguments(
246
+ version="1.0",
247
+ deprecated_args="write_permission",
248
+ custom_message="Fine-grained tokens added complexity to the permissions, making it irrelevant to check if a token has 'write' access.",
249
+ )
250
+ @_deprecate_positional_args(version="1.0")
251
+ def interpreter_login(*, new_session: bool = True, write_permission: bool = False) -> None:
252
+ """
253
+ Displays a prompt to log in to the HF website and store the token.
254
+
255
+ This is equivalent to [`login`] without passing a token when not run in a notebook.
256
+ [`interpreter_login`] is useful if you want to force the use of the terminal prompt
257
+ instead of a notebook widget.
258
+
259
+ For more details, see [`login`].
260
+
261
+ Args:
262
+ new_session (`bool`, defaults to `True`):
263
+ If `True`, will request a token even if one is already saved on the machine.
264
+ write_permission (`bool`):
265
+ Ignored and deprecated argument.
266
+ """
267
+ if not new_session and get_token() is not None:
268
+ logger.info("User is already logged in.")
269
+ return
270
+
271
+ from .commands.delete_cache import _ask_for_confirmation_no_tui
272
+
273
+ print(_HF_LOGO_ASCII)
274
+ if get_token() is not None:
275
+ logger.info(
276
+ " A token is already saved on your machine. Run `huggingface-cli"
277
+ " whoami` to get more information or `huggingface-cli logout` if you want"
278
+ " to log out."
279
+ )
280
+ logger.info(" Setting a new token will erase the existing one.")
281
+
282
+ logger.info(
283
+ " To log in, `huggingface_hub` requires a token generated from https://huggingface.co/settings/tokens ."
284
+ )
285
+ if os.name == "nt":
286
+ logger.info("Token can be pasted using 'Right-Click'.")
287
+ token = getpass("Enter your token (input will not be visible): ")
288
+ add_to_git_credential = _ask_for_confirmation_no_tui("Add token as git credential?")
289
+
290
+ _login(token=token, add_to_git_credential=add_to_git_credential)
291
+
292
+
293
+ ###
294
+ # Notebook-based login (widget)
295
+ ###
296
+
297
+ NOTEBOOK_LOGIN_PASSWORD_HTML = """<center> <img
298
+ src=https://huggingface.co/front/assets/huggingface_logo-noborder.svg
299
+ alt='Hugging Face'> <br> Immediately click login after typing your password or
300
+ it might be stored in plain text in this notebook file. </center>"""
301
+
302
+
303
+ NOTEBOOK_LOGIN_TOKEN_HTML_START = """<center> <img
304
+ src=https://huggingface.co/front/assets/huggingface_logo-noborder.svg
305
+ alt='Hugging Face'> <br> Copy a token from <a
306
+ href="https://huggingface.co/settings/tokens" target="_blank">your Hugging Face
307
+ tokens page</a> and paste it below. <br> Immediately click login after copying
308
+ your token or it might be stored in plain text in this notebook file. </center>"""
309
+
310
+
311
+ NOTEBOOK_LOGIN_TOKEN_HTML_END = """
312
+ <b>Pro Tip:</b> If you don't already have one, you can create a dedicated
313
+ 'notebooks' token with 'write' access, that you can then easily reuse for all
314
+ notebooks. </center>"""
315
+
316
+
317
+ @_deprecate_arguments(
318
+ version="1.0",
319
+ deprecated_args="write_permission",
320
+ custom_message="Fine-grained tokens added complexity to the permissions, making it irrelevant to check if a token has 'write' access.",
321
+ )
322
+ @_deprecate_positional_args(version="1.0")
323
+ def notebook_login(*, new_session: bool = True, write_permission: bool = False) -> None:
324
+ """
325
+ Displays a widget to log in to the HF website and store the token.
326
+
327
+ This is equivalent to [`login`] without passing a token when run in a notebook.
328
+ [`notebook_login`] is useful if you want to force the use of the notebook widget
329
+ instead of a prompt in the terminal.
330
+
331
+ For more details, see [`login`].
332
+
333
+ Args:
334
+ new_session (`bool`, defaults to `True`):
335
+ If `True`, will request a token even if one is already saved on the machine.
336
+ write_permission (`bool`):
337
+ Ignored and deprecated argument.
338
+ """
339
+ try:
340
+ import ipywidgets.widgets as widgets # type: ignore
341
+ from IPython.display import display # type: ignore
342
+ except ImportError:
343
+ raise ImportError(
344
+ "The `notebook_login` function can only be used in a notebook (Jupyter or"
345
+ " Colab) and you need the `ipywidgets` module: `pip install ipywidgets`."
346
+ )
347
+ if not new_session and get_token() is not None:
348
+ logger.info("User is already logged in.")
349
+ return
350
+
351
+ box_layout = widgets.Layout(display="flex", flex_flow="column", align_items="center", width="50%")
352
+
353
+ token_widget = widgets.Password(description="Token:")
354
+ git_checkbox_widget = widgets.Checkbox(value=True, description="Add token as git credential?")
355
+ token_finish_button = widgets.Button(description="Login")
356
+
357
+ login_token_widget = widgets.VBox(
358
+ [
359
+ widgets.HTML(NOTEBOOK_LOGIN_TOKEN_HTML_START),
360
+ token_widget,
361
+ git_checkbox_widget,
362
+ token_finish_button,
363
+ widgets.HTML(NOTEBOOK_LOGIN_TOKEN_HTML_END),
364
+ ],
365
+ layout=box_layout,
366
+ )
367
+ display(login_token_widget)
368
+
369
+ # On click events
370
+ def login_token_event(t):
371
+ """Event handler for the login button."""
372
+ token = token_widget.value
373
+ add_to_git_credential = git_checkbox_widget.value
374
+ # Erase token and clear value to make sure it's not saved in the notebook.
375
+ token_widget.value = ""
376
+ # Hide inputs
377
+ login_token_widget.children = [widgets.Label("Connecting...")]
378
+ try:
379
+ with capture_output() as captured:
380
+ _login(token, add_to_git_credential=add_to_git_credential)
381
+ message = captured.getvalue()
382
+ except Exception as error:
383
+ message = str(error)
384
+ # Print result (success message or error)
385
+ login_token_widget.children = [widgets.Label(line) for line in message.split("\n") if line.strip()]
386
+
387
+ token_finish_button.on_click(login_token_event)
388
+
389
+
390
+ ###
391
+ # Login private helpers
392
+ ###
393
+
394
+
395
+ def _login(
396
+ token: str,
397
+ add_to_git_credential: bool,
398
+ ) -> None:
399
+ from .hf_api import whoami # avoid circular import
400
+
401
+ if token.startswith("api_org"):
402
+ raise ValueError("You must use your personal account token, not an organization token.")
403
+
404
+ token_info = whoami(token)
405
+ permission = token_info["auth"]["accessToken"]["role"]
406
+ logger.info(f"Token is valid (permission: {permission}).")
407
+
408
+ token_name = token_info["auth"]["accessToken"]["displayName"]
409
+ # Store token locally
410
+ _save_token(token=token, token_name=token_name)
411
+ # Set active token
412
+ _set_active_token(token_name=token_name, add_to_git_credential=add_to_git_credential)
413
+ logger.info("Login successful.")
414
+ if _get_token_from_environment():
415
+ logger.warning(
416
+ "Note: Environment variable`HF_TOKEN` is set and is the current active token independently from the token you've just configured."
417
+ )
418
+ else:
419
+ logger.info(f"The current active token is: `{token_name}`")
420
+
421
+
422
+ def _logout_from_token(token_name: str) -> None:
423
+ """Logout from a specific access token.
424
+
425
+ Args:
426
+ token_name (`str`):
427
+ The name of the access token to logout from.
428
+ Raises:
429
+ [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError):
430
+ If the access token name is not found.
431
+ """
432
+ stored_tokens = get_stored_tokens()
433
+ # If there is no access tokens saved or the access token name is not found, do nothing
434
+ if not stored_tokens or token_name not in stored_tokens:
435
+ return
436
+
437
+ token = stored_tokens.pop(token_name)
438
+ _save_stored_tokens(stored_tokens)
439
+
440
+ if token == _get_token_from_file():
441
+ logger.warning(f"Active token '{token_name}' has been deleted.")
442
+ Path(constants.HF_TOKEN_PATH).unlink(missing_ok=True)
443
+
444
+
445
+ def _set_active_token(
446
+ token_name: str,
447
+ add_to_git_credential: bool,
448
+ ) -> None:
449
+ """Set the active access token.
450
+
451
+ Args:
452
+ token_name (`str`):
453
+ The name of the token to set as active.
454
+ """
455
+ token = _get_token_by_name(token_name)
456
+ if not token:
457
+ raise ValueError(f"Token {token_name} not found in {constants.HF_STORED_TOKENS_PATH}")
458
+ if add_to_git_credential:
459
+ if _is_git_credential_helper_configured():
460
+ set_git_credential(token)
461
+ logger.info(
462
+ "Your token has been saved in your configured git credential helpers"
463
+ + f" ({','.join(list_credential_helpers())})."
464
+ )
465
+ else:
466
+ logger.warning("Token has not been saved to git credential helper.")
467
+ # Write token to HF_TOKEN_PATH
468
+ path = Path(constants.HF_TOKEN_PATH)
469
+ path.parent.mkdir(parents=True, exist_ok=True)
470
+ path.write_text(token)
471
+ logger.info(f"Your token has been saved to {constants.HF_TOKEN_PATH}")
472
+
473
+
474
+ def _is_git_credential_helper_configured() -> bool:
475
+ """Check if a git credential helper is configured.
476
+
477
+ Warns user if not the case (except for Google Colab where "store" is set by default
478
+ by `huggingface_hub`).
479
+ """
480
+ helpers = list_credential_helpers()
481
+ if len(helpers) > 0:
482
+ return True # Do not warn: at least 1 helper is set
483
+
484
+ # Only in Google Colab to avoid the warning message
485
+ # See https://github.com/huggingface/huggingface_hub/issues/1043#issuecomment-1247010710
486
+ if is_google_colab():
487
+ _set_store_as_git_credential_helper_globally()
488
+ return True # Do not warn: "store" is used by default in Google Colab
489
+
490
+ # Otherwise, warn user
491
+ print(
492
+ ANSI.red(
493
+ "Cannot authenticate through git-credential as no helper is defined on your"
494
+ " machine.\nYou might have to re-authenticate when pushing to the Hugging"
495
+ " Face Hub.\nRun the following command in your terminal in case you want to"
496
+ " set the 'store' credential helper as default.\n\ngit config --global"
497
+ " credential.helper store\n\nRead"
498
+ " https://git-scm.com/book/en/v2/Git-Tools-Credential-Storage for more"
499
+ " details."
500
+ )
501
+ )
502
+ return False
503
+
504
+
505
+ def _set_store_as_git_credential_helper_globally() -> None:
506
+ """Set globally the credential.helper to `store`.
507
+
508
+ To be used only in Google Colab as we assume the user doesn't care about the git
509
+ credential config. It is the only particular case where we don't want to display the
510
+ warning message in [`notebook_login()`].
511
+
512
+ Related:
513
+ - https://github.com/huggingface/huggingface_hub/issues/1043
514
+ - https://github.com/huggingface/huggingface_hub/issues/1051
515
+ - https://git-scm.com/docs/git-credential-store
516
+ """
517
+ try:
518
+ run_subprocess("git config --global credential.helper store")
519
+ except subprocess.CalledProcessError as exc:
520
+ raise EnvironmentError(exc.stderr)
.venv/lib/python3.13/site-packages/huggingface_hub/_oauth.py ADDED
@@ -0,0 +1,464 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datetime
2
+ import hashlib
3
+ import logging
4
+ import os
5
+ import time
6
+ import urllib.parse
7
+ import warnings
8
+ from dataclasses import dataclass
9
+ from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
10
+
11
+ from . import constants
12
+ from .hf_api import whoami
13
+ from .utils import experimental, get_token
14
+
15
+
16
+ logger = logging.getLogger(__name__)
17
+
18
+ if TYPE_CHECKING:
19
+ import fastapi
20
+
21
+
22
+ @dataclass
23
+ class OAuthOrgInfo:
24
+ """
25
+ Information about an organization linked to a user logged in with OAuth.
26
+
27
+ Attributes:
28
+ sub (`str`):
29
+ Unique identifier for the org. OpenID Connect field.
30
+ name (`str`):
31
+ The org's full name. OpenID Connect field.
32
+ preferred_username (`str`):
33
+ The org's username. OpenID Connect field.
34
+ picture (`str`):
35
+ The org's profile picture URL. OpenID Connect field.
36
+ is_enterprise (`bool`):
37
+ Whether the org is an enterprise org. Hugging Face field.
38
+ can_pay (`Optional[bool]`, *optional*):
39
+ Whether the org has a payment method set up. Hugging Face field.
40
+ role_in_org (`Optional[str]`, *optional*):
41
+ The user's role in the org. Hugging Face field.
42
+ pending_sso (`Optional[bool]`, *optional*):
43
+ Indicates if the user granted the OAuth app access to the org but didn't complete SSO. Hugging Face field.
44
+ missing_mfa (`Optional[bool]`, *optional*):
45
+ Indicates if the user granted the OAuth app access to the org but didn't complete MFA. Hugging Face field.
46
+ """
47
+
48
+ sub: str
49
+ name: str
50
+ preferred_username: str
51
+ picture: str
52
+ is_enterprise: bool
53
+ can_pay: Optional[bool] = None
54
+ role_in_org: Optional[str] = None
55
+ pending_sso: Optional[bool] = None
56
+ missing_mfa: Optional[bool] = None
57
+
58
+
59
+ @dataclass
60
+ class OAuthUserInfo:
61
+ """
62
+ Information about a user logged in with OAuth.
63
+
64
+ Attributes:
65
+ sub (`str`):
66
+ Unique identifier for the user, even in case of rename. OpenID Connect field.
67
+ name (`str`):
68
+ The user's full name. OpenID Connect field.
69
+ preferred_username (`str`):
70
+ The user's username. OpenID Connect field.
71
+ email_verified (`Optional[bool]`, *optional*):
72
+ Indicates if the user's email is verified. OpenID Connect field.
73
+ email (`Optional[str]`, *optional*):
74
+ The user's email address. OpenID Connect field.
75
+ picture (`str`):
76
+ The user's profile picture URL. OpenID Connect field.
77
+ profile (`str`):
78
+ The user's profile URL. OpenID Connect field.
79
+ website (`Optional[str]`, *optional*):
80
+ The user's website URL. OpenID Connect field.
81
+ is_pro (`bool`):
82
+ Whether the user is a pro user. Hugging Face field.
83
+ can_pay (`Optional[bool]`, *optional*):
84
+ Whether the user has a payment method set up. Hugging Face field.
85
+ orgs (`Optional[List[OrgInfo]]`, *optional*):
86
+ List of organizations the user is part of. Hugging Face field.
87
+ """
88
+
89
+ sub: str
90
+ name: str
91
+ preferred_username: str
92
+ email_verified: Optional[bool]
93
+ email: Optional[str]
94
+ picture: str
95
+ profile: str
96
+ website: Optional[str]
97
+ is_pro: bool
98
+ can_pay: Optional[bool]
99
+ orgs: Optional[List[OAuthOrgInfo]]
100
+
101
+
102
+ @dataclass
103
+ class OAuthInfo:
104
+ """
105
+ Information about the OAuth login.
106
+
107
+ Attributes:
108
+ access_token (`str`):
109
+ The access token.
110
+ access_token_expires_at (`datetime.datetime`):
111
+ The expiration date of the access token.
112
+ user_info ([`OAuthUserInfo`]):
113
+ The user information.
114
+ state (`str`, *optional*):
115
+ State passed to the OAuth provider in the original request to the OAuth provider.
116
+ scope (`str`):
117
+ Granted scope.
118
+ """
119
+
120
+ access_token: str
121
+ access_token_expires_at: datetime.datetime
122
+ user_info: OAuthUserInfo
123
+ state: Optional[str]
124
+ scope: str
125
+
126
+
127
+ @experimental
128
+ def attach_huggingface_oauth(app: "fastapi.FastAPI", route_prefix: str = "/"):
129
+ """
130
+ Add OAuth endpoints to a FastAPI app to enable OAuth login with Hugging Face.
131
+
132
+ How to use:
133
+ - Call this method on your FastAPI app to add the OAuth endpoints.
134
+ - Inside your route handlers, call `parse_huggingface_oauth(request)` to retrieve the OAuth info.
135
+ - If user is logged in, an [`OAuthInfo`] object is returned with the user's info. If not, `None` is returned.
136
+ - In your app, make sure to add links to `/oauth/huggingface/login` and `/oauth/huggingface/logout` for the user to log in and out.
137
+
138
+ Example:
139
+ ```py
140
+ from huggingface_hub import attach_huggingface_oauth, parse_huggingface_oauth
141
+
142
+ # Create a FastAPI app
143
+ app = FastAPI()
144
+
145
+ # Add OAuth endpoints to the FastAPI app
146
+ attach_huggingface_oauth(app)
147
+
148
+ # Add a route that greets the user if they are logged in
149
+ @app.get("/")
150
+ def greet_json(request: Request):
151
+ # Retrieve the OAuth info from the request
152
+ oauth_info = parse_huggingface_oauth(request) # e.g. OAuthInfo dataclass
153
+ if oauth_info is None:
154
+ return {"msg": "Not logged in!"}
155
+ return {"msg": f"Hello, {oauth_info.user_info.preferred_username}!"}
156
+ ```
157
+ """
158
+ # TODO: handle generic case (handling OAuth in a non-Space environment with custom dev values) (low priority)
159
+
160
+ # Add SessionMiddleware to the FastAPI app to store the OAuth info in the session.
161
+ # Session Middleware requires a secret key to sign the cookies. Let's use a hash
162
+ # of the OAuth secret key to make it unique to the Space + updated in case OAuth
163
+ # config gets updated. When ran locally, we use an empty string as a secret key.
164
+ try:
165
+ from starlette.middleware.sessions import SessionMiddleware
166
+ except ImportError as e:
167
+ raise ImportError(
168
+ "Cannot initialize OAuth to due a missing library. Please run `pip install huggingface_hub[oauth]` or add "
169
+ "`huggingface_hub[oauth]` to your requirements.txt file in order to install the required dependencies."
170
+ ) from e
171
+ session_secret = (constants.OAUTH_CLIENT_SECRET or "") + "-v1"
172
+ app.add_middleware(
173
+ SessionMiddleware, # type: ignore[arg-type]
174
+ secret_key=hashlib.sha256(session_secret.encode()).hexdigest(),
175
+ same_site="none",
176
+ https_only=True,
177
+ ) # type: ignore
178
+
179
+ # Add OAuth endpoints to the FastAPI app:
180
+ # - {route_prefix}/oauth/huggingface/login
181
+ # - {route_prefix}/oauth/huggingface/callback
182
+ # - {route_prefix}/oauth/huggingface/logout
183
+ # If the app is running in a Space, OAuth is enabled normally.
184
+ # Otherwise, we mock the endpoints to make the user log in with a fake user profile - without any calls to hf.co.
185
+ route_prefix = route_prefix.strip("/")
186
+ if os.getenv("SPACE_ID") is not None:
187
+ logger.info("OAuth is enabled in the Space. Adding OAuth routes.")
188
+ _add_oauth_routes(app, route_prefix=route_prefix)
189
+ else:
190
+ logger.info("App is not running in a Space. Adding mocked OAuth routes.")
191
+ _add_mocked_oauth_routes(app, route_prefix=route_prefix)
192
+
193
+
194
+ def parse_huggingface_oauth(request: "fastapi.Request") -> Optional[OAuthInfo]:
195
+ """
196
+ Returns the information from a logged in user as a [`OAuthInfo`] object.
197
+
198
+ For flexibility and future-proofing, this method is very lax in its parsing and does not raise errors.
199
+ Missing fields are set to `None` without a warning.
200
+
201
+ Return `None`, if the user is not logged in (no info in session cookie).
202
+
203
+ See [`attach_huggingface_oauth`] for an example on how to use this method.
204
+ """
205
+ if "oauth_info" not in request.session:
206
+ logger.debug("No OAuth info in session.")
207
+ return None
208
+
209
+ logger.debug("Parsing OAuth info from session.")
210
+ oauth_data = request.session["oauth_info"]
211
+ user_data = oauth_data.get("userinfo", {})
212
+ orgs_data = user_data.get("orgs", [])
213
+
214
+ orgs = (
215
+ [
216
+ OAuthOrgInfo(
217
+ sub=org.get("sub"),
218
+ name=org.get("name"),
219
+ preferred_username=org.get("preferred_username"),
220
+ picture=org.get("picture"),
221
+ is_enterprise=org.get("isEnterprise"),
222
+ can_pay=org.get("canPay"),
223
+ role_in_org=org.get("roleInOrg"),
224
+ pending_sso=org.get("pendingSSO"),
225
+ missing_mfa=org.get("missingMFA"),
226
+ )
227
+ for org in orgs_data
228
+ ]
229
+ if orgs_data
230
+ else None
231
+ )
232
+
233
+ user_info = OAuthUserInfo(
234
+ sub=user_data.get("sub"),
235
+ name=user_data.get("name"),
236
+ preferred_username=user_data.get("preferred_username"),
237
+ email_verified=user_data.get("email_verified"),
238
+ email=user_data.get("email"),
239
+ picture=user_data.get("picture"),
240
+ profile=user_data.get("profile"),
241
+ website=user_data.get("website"),
242
+ is_pro=user_data.get("isPro"),
243
+ can_pay=user_data.get("canPay"),
244
+ orgs=orgs,
245
+ )
246
+
247
+ return OAuthInfo(
248
+ access_token=oauth_data.get("access_token"),
249
+ access_token_expires_at=datetime.datetime.fromtimestamp(oauth_data.get("expires_at")),
250
+ user_info=user_info,
251
+ state=oauth_data.get("state"),
252
+ scope=oauth_data.get("scope"),
253
+ )
254
+
255
+
256
+ def _add_oauth_routes(app: "fastapi.FastAPI", route_prefix: str) -> None:
257
+ """Add OAuth routes to the FastAPI app (login, callback handler and logout)."""
258
+ try:
259
+ import fastapi
260
+ from authlib.integrations.base_client.errors import MismatchingStateError
261
+ from authlib.integrations.starlette_client import OAuth
262
+ from fastapi.responses import RedirectResponse
263
+ except ImportError as e:
264
+ raise ImportError(
265
+ "Cannot initialize OAuth to due a missing library. Please run `pip install huggingface_hub[oauth]` or add "
266
+ "`huggingface_hub[oauth]` to your requirements.txt file."
267
+ ) from e
268
+
269
+ # Check environment variables
270
+ msg = (
271
+ "OAuth is required but '{}' environment variable is not set. Make sure you've enabled OAuth in your Space by"
272
+ " setting `hf_oauth: true` in the Space metadata."
273
+ )
274
+ if constants.OAUTH_CLIENT_ID is None:
275
+ raise ValueError(msg.format("OAUTH_CLIENT_ID"))
276
+ if constants.OAUTH_CLIENT_SECRET is None:
277
+ raise ValueError(msg.format("OAUTH_CLIENT_SECRET"))
278
+ if constants.OAUTH_SCOPES is None:
279
+ raise ValueError(msg.format("OAUTH_SCOPES"))
280
+ if constants.OPENID_PROVIDER_URL is None:
281
+ raise ValueError(msg.format("OPENID_PROVIDER_URL"))
282
+
283
+ # Register OAuth server
284
+ oauth = OAuth()
285
+ oauth.register(
286
+ name="huggingface",
287
+ client_id=constants.OAUTH_CLIENT_ID,
288
+ client_secret=constants.OAUTH_CLIENT_SECRET,
289
+ client_kwargs={"scope": constants.OAUTH_SCOPES},
290
+ server_metadata_url=constants.OPENID_PROVIDER_URL + "/.well-known/openid-configuration",
291
+ )
292
+
293
+ login_uri, callback_uri, logout_uri = _get_oauth_uris(route_prefix)
294
+
295
+ # Register OAuth endpoints
296
+ @app.get(login_uri)
297
+ async def oauth_login(request: fastapi.Request) -> RedirectResponse:
298
+ """Endpoint that redirects to HF OAuth page."""
299
+ redirect_uri = _generate_redirect_uri(request)
300
+ return await oauth.huggingface.authorize_redirect(request, redirect_uri) # type: ignore
301
+
302
+ @app.get(callback_uri)
303
+ async def oauth_redirect_callback(request: fastapi.Request) -> RedirectResponse:
304
+ """Endpoint that handles the OAuth callback."""
305
+ try:
306
+ oauth_info = await oauth.huggingface.authorize_access_token(request) # type: ignore
307
+ except MismatchingStateError:
308
+ # Parse query params
309
+ nb_redirects = int(request.query_params.get("_nb_redirects", 0))
310
+ target_url = request.query_params.get("_target_url")
311
+
312
+ # Build redirect URI with the same query params as before and bump nb_redirects count
313
+ query_params: Dict[str, Union[int, str]] = {"_nb_redirects": nb_redirects + 1}
314
+ if target_url:
315
+ query_params["_target_url"] = target_url
316
+
317
+ redirect_uri = f"{login_uri}?{urllib.parse.urlencode(query_params)}"
318
+
319
+ # If the user is redirected more than 3 times, it is very likely that the cookie is not working properly.
320
+ # (e.g. browser is blocking third-party cookies in iframe). In this case, redirect the user in the
321
+ # non-iframe view.
322
+ if nb_redirects > constants.OAUTH_MAX_REDIRECTS:
323
+ host = os.environ.get("SPACE_HOST")
324
+ if host is None: # cannot happen in a Space
325
+ raise RuntimeError(
326
+ "App is not running in a Space (SPACE_HOST environment variable is not set). Cannot redirect to non-iframe view."
327
+ ) from None
328
+ host_url = "https://" + host.rstrip("/")
329
+ return RedirectResponse(host_url + redirect_uri)
330
+
331
+ # Redirect the user to the login page again
332
+ return RedirectResponse(redirect_uri)
333
+
334
+ # OAuth login worked => store the user info in the session and redirect
335
+ logger.debug("Successfully logged in with OAuth. Storing user info in session.")
336
+ request.session["oauth_info"] = oauth_info
337
+ return RedirectResponse(_get_redirect_target(request))
338
+
339
+ @app.get(logout_uri)
340
+ async def oauth_logout(request: fastapi.Request) -> RedirectResponse:
341
+ """Endpoint that logs out the user (e.g. delete info from cookie session)."""
342
+ logger.debug("Logged out with OAuth. Removing user info from session.")
343
+ request.session.pop("oauth_info", None)
344
+ return RedirectResponse(_get_redirect_target(request))
345
+
346
+
347
+ def _add_mocked_oauth_routes(app: "fastapi.FastAPI", route_prefix: str = "/") -> None:
348
+ """Add fake oauth routes if app is run locally and OAuth is enabled.
349
+
350
+ Using OAuth will have the same behavior as in a Space but instead of authenticating with HF, a mocked user profile
351
+ is added to the session.
352
+ """
353
+ try:
354
+ import fastapi
355
+ from fastapi.responses import RedirectResponse
356
+ from starlette.datastructures import URL
357
+ except ImportError as e:
358
+ raise ImportError(
359
+ "Cannot initialize OAuth to due a missing library. Please run `pip install huggingface_hub[oauth]` or add "
360
+ "`huggingface_hub[oauth]` to your requirements.txt file."
361
+ ) from e
362
+
363
+ warnings.warn(
364
+ "OAuth is not supported outside of a Space environment. To help you debug your app locally, the oauth endpoints"
365
+ " are mocked to return your profile and token. To make it work, your machine must be logged in to Huggingface."
366
+ )
367
+ mocked_oauth_info = _get_mocked_oauth_info()
368
+
369
+ login_uri, callback_uri, logout_uri = _get_oauth_uris(route_prefix)
370
+
371
+ # Define OAuth routes
372
+ @app.get(login_uri)
373
+ async def oauth_login(request: fastapi.Request) -> RedirectResponse:
374
+ """Fake endpoint that redirects to HF OAuth page."""
375
+ # Define target (where to redirect after login)
376
+ redirect_uri = _generate_redirect_uri(request)
377
+ return RedirectResponse(callback_uri + "?" + urllib.parse.urlencode({"_target_url": redirect_uri}))
378
+
379
+ @app.get(callback_uri)
380
+ async def oauth_redirect_callback(request: fastapi.Request) -> RedirectResponse:
381
+ """Endpoint that handles the OAuth callback."""
382
+ request.session["oauth_info"] = mocked_oauth_info
383
+ return RedirectResponse(_get_redirect_target(request))
384
+
385
+ @app.get(logout_uri)
386
+ async def oauth_logout(request: fastapi.Request) -> RedirectResponse:
387
+ """Endpoint that logs out the user (e.g. delete cookie session)."""
388
+ request.session.pop("oauth_info", None)
389
+ logout_url = URL("/").include_query_params(**request.query_params)
390
+ return RedirectResponse(url=logout_url, status_code=302) # see https://github.com/gradio-app/gradio/pull/9659
391
+
392
+
393
+ def _generate_redirect_uri(request: "fastapi.Request") -> str:
394
+ if "_target_url" in request.query_params:
395
+ # if `_target_url` already in query params => respect it
396
+ target = request.query_params["_target_url"]
397
+ else:
398
+ # otherwise => keep query params
399
+ target = "/?" + urllib.parse.urlencode(request.query_params)
400
+
401
+ redirect_uri = request.url_for("oauth_redirect_callback").include_query_params(_target_url=target)
402
+ redirect_uri_as_str = str(redirect_uri)
403
+ if redirect_uri.netloc.endswith(".hf.space"):
404
+ # In Space, FastAPI redirect as http but we want https
405
+ redirect_uri_as_str = redirect_uri_as_str.replace("http://", "https://")
406
+ return redirect_uri_as_str
407
+
408
+
409
+ def _get_redirect_target(request: "fastapi.Request", default_target: str = "/") -> str:
410
+ return request.query_params.get("_target_url", default_target)
411
+
412
+
413
+ def _get_mocked_oauth_info() -> Dict:
414
+ token = get_token()
415
+ if token is None:
416
+ raise ValueError(
417
+ "Your machine must be logged in to HF to debug an OAuth app locally. Please"
418
+ " run `huggingface-cli login` or set `HF_TOKEN` as environment variable "
419
+ "with one of your access token. You can generate a new token in your "
420
+ "settings page (https://huggingface.co/settings/tokens)."
421
+ )
422
+
423
+ user = whoami()
424
+ if user["type"] != "user":
425
+ raise ValueError(
426
+ "Your machine is not logged in with a personal account. Please use a "
427
+ "personal access token. You can generate a new token in your settings page"
428
+ " (https://huggingface.co/settings/tokens)."
429
+ )
430
+
431
+ return {
432
+ "access_token": token,
433
+ "token_type": "bearer",
434
+ "expires_in": 8 * 60 * 60, # 8 hours
435
+ "id_token": "FOOBAR",
436
+ "scope": "openid profile",
437
+ "refresh_token": "hf_oauth__refresh_token",
438
+ "expires_at": int(time.time()) + 8 * 60 * 60, # 8 hours
439
+ "userinfo": {
440
+ "sub": "0123456789",
441
+ "name": user["fullname"],
442
+ "preferred_username": user["name"],
443
+ "profile": f"https://huggingface.co/{user['name']}",
444
+ "picture": user["avatarUrl"],
445
+ "website": "",
446
+ "aud": "00000000-0000-0000-0000-000000000000",
447
+ "auth_time": 1691672844,
448
+ "nonce": "aaaaaaaaaaaaaaaaaaa",
449
+ "iat": 1691672844,
450
+ "exp": 1691676444,
451
+ "iss": "https://huggingface.co",
452
+ },
453
+ }
454
+
455
+
456
+ def _get_oauth_uris(route_prefix: str = "/") -> Tuple[str, str, str]:
457
+ route_prefix = route_prefix.strip("/")
458
+ if route_prefix:
459
+ route_prefix = f"/{route_prefix}"
460
+ return (
461
+ f"{route_prefix}/oauth/huggingface/login",
462
+ f"{route_prefix}/oauth/huggingface/callback",
463
+ f"{route_prefix}/oauth/huggingface/logout",
464
+ )
.venv/lib/python3.13/site-packages/huggingface_hub/_snapshot_download.py ADDED
@@ -0,0 +1,338 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from pathlib import Path
3
+ from typing import Dict, Iterable, List, Literal, Optional, Type, Union
4
+
5
+ import requests
6
+ from tqdm.auto import tqdm as base_tqdm
7
+ from tqdm.contrib.concurrent import thread_map
8
+
9
+ from . import constants
10
+ from .errors import (
11
+ GatedRepoError,
12
+ HfHubHTTPError,
13
+ LocalEntryNotFoundError,
14
+ RepositoryNotFoundError,
15
+ RevisionNotFoundError,
16
+ )
17
+ from .file_download import REGEX_COMMIT_HASH, hf_hub_download, repo_folder_name
18
+ from .hf_api import DatasetInfo, HfApi, ModelInfo, RepoFile, SpaceInfo
19
+ from .utils import OfflineModeIsEnabled, filter_repo_objects, logging, validate_hf_hub_args
20
+ from .utils import tqdm as hf_tqdm
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+ VERY_LARGE_REPO_THRESHOLD = 50000 # After this limit, we don't consider `repo_info.siblings` to be reliable enough
26
+
27
+
28
+ @validate_hf_hub_args
29
+ def snapshot_download(
30
+ repo_id: str,
31
+ *,
32
+ repo_type: Optional[str] = None,
33
+ revision: Optional[str] = None,
34
+ cache_dir: Union[str, Path, None] = None,
35
+ local_dir: Union[str, Path, None] = None,
36
+ library_name: Optional[str] = None,
37
+ library_version: Optional[str] = None,
38
+ user_agent: Optional[Union[Dict, str]] = None,
39
+ proxies: Optional[Dict] = None,
40
+ etag_timeout: float = constants.DEFAULT_ETAG_TIMEOUT,
41
+ force_download: bool = False,
42
+ token: Optional[Union[bool, str]] = None,
43
+ local_files_only: bool = False,
44
+ allow_patterns: Optional[Union[List[str], str]] = None,
45
+ ignore_patterns: Optional[Union[List[str], str]] = None,
46
+ max_workers: int = 8,
47
+ tqdm_class: Optional[Type[base_tqdm]] = None,
48
+ headers: Optional[Dict[str, str]] = None,
49
+ endpoint: Optional[str] = None,
50
+ # Deprecated args
51
+ local_dir_use_symlinks: Union[bool, Literal["auto"]] = "auto",
52
+ resume_download: Optional[bool] = None,
53
+ ) -> str:
54
+ """Download repo files.
55
+
56
+ Download a whole snapshot of a repo's files at the specified revision. This is useful when you want all files from
57
+ a repo, because you don't know which ones you will need a priori. All files are nested inside a folder in order
58
+ to keep their actual filename relative to that folder. You can also filter which files to download using
59
+ `allow_patterns` and `ignore_patterns`.
60
+
61
+ If `local_dir` is provided, the file structure from the repo will be replicated in this location. When using this
62
+ option, the `cache_dir` will not be used and a `.cache/huggingface/` folder will be created at the root of `local_dir`
63
+ to store some metadata related to the downloaded files. While this mechanism is not as robust as the main
64
+ cache-system, it's optimized for regularly pulling the latest version of a repository.
65
+
66
+ An alternative would be to clone the repo but this requires git and git-lfs to be installed and properly
67
+ configured. It is also not possible to filter which files to download when cloning a repository using git.
68
+
69
+ Args:
70
+ repo_id (`str`):
71
+ A user or an organization name and a repo name separated by a `/`.
72
+ repo_type (`str`, *optional*):
73
+ Set to `"dataset"` or `"space"` if downloading from a dataset or space,
74
+ `None` or `"model"` if downloading from a model. Default is `None`.
75
+ revision (`str`, *optional*):
76
+ An optional Git revision id which can be a branch name, a tag, or a
77
+ commit hash.
78
+ cache_dir (`str`, `Path`, *optional*):
79
+ Path to the folder where cached files are stored.
80
+ local_dir (`str` or `Path`, *optional*):
81
+ If provided, the downloaded files will be placed under this directory.
82
+ library_name (`str`, *optional*):
83
+ The name of the library to which the object corresponds.
84
+ library_version (`str`, *optional*):
85
+ The version of the library.
86
+ user_agent (`str`, `dict`, *optional*):
87
+ The user-agent info in the form of a dictionary or a string.
88
+ proxies (`dict`, *optional*):
89
+ Dictionary mapping protocol to the URL of the proxy passed to
90
+ `requests.request`.
91
+ etag_timeout (`float`, *optional*, defaults to `10`):
92
+ When fetching ETag, how many seconds to wait for the server to send
93
+ data before giving up which is passed to `requests.request`.
94
+ force_download (`bool`, *optional*, defaults to `False`):
95
+ Whether the file should be downloaded even if it already exists in the local cache.
96
+ token (`str`, `bool`, *optional*):
97
+ A token to be used for the download.
98
+ - If `True`, the token is read from the HuggingFace config
99
+ folder.
100
+ - If a string, it's used as the authentication token.
101
+ headers (`dict`, *optional*):
102
+ Additional headers to include in the request. Those headers take precedence over the others.
103
+ local_files_only (`bool`, *optional*, defaults to `False`):
104
+ If `True`, avoid downloading the file and return the path to the
105
+ local cached file if it exists.
106
+ allow_patterns (`List[str]` or `str`, *optional*):
107
+ If provided, only files matching at least one pattern are downloaded.
108
+ ignore_patterns (`List[str]` or `str`, *optional*):
109
+ If provided, files matching any of the patterns are not downloaded.
110
+ max_workers (`int`, *optional*):
111
+ Number of concurrent threads to download files (1 thread = 1 file download).
112
+ Defaults to 8.
113
+ tqdm_class (`tqdm`, *optional*):
114
+ If provided, overwrites the default behavior for the progress bar. Passed
115
+ argument must inherit from `tqdm.auto.tqdm` or at least mimic its behavior.
116
+ Note that the `tqdm_class` is not passed to each individual download.
117
+ Defaults to the custom HF progress bar that can be disabled by setting
118
+ `HF_HUB_DISABLE_PROGRESS_BARS` environment variable.
119
+
120
+ Returns:
121
+ `str`: folder path of the repo snapshot.
122
+
123
+ Raises:
124
+ [`~utils.RepositoryNotFoundError`]
125
+ If the repository to download from cannot be found. This may be because it doesn't exist,
126
+ or because it is set to `private` and you do not have access.
127
+ [`~utils.RevisionNotFoundError`]
128
+ If the revision to download from cannot be found.
129
+ [`EnvironmentError`](https://docs.python.org/3/library/exceptions.html#EnvironmentError)
130
+ If `token=True` and the token cannot be found.
131
+ [`OSError`](https://docs.python.org/3/library/exceptions.html#OSError) if
132
+ ETag cannot be determined.
133
+ [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
134
+ if some parameter value is invalid.
135
+ """
136
+ if cache_dir is None:
137
+ cache_dir = constants.HF_HUB_CACHE
138
+ if revision is None:
139
+ revision = constants.DEFAULT_REVISION
140
+ if isinstance(cache_dir, Path):
141
+ cache_dir = str(cache_dir)
142
+
143
+ if repo_type is None:
144
+ repo_type = "model"
145
+ if repo_type not in constants.REPO_TYPES:
146
+ raise ValueError(f"Invalid repo type: {repo_type}. Accepted repo types are: {str(constants.REPO_TYPES)}")
147
+
148
+ storage_folder = os.path.join(cache_dir, repo_folder_name(repo_id=repo_id, repo_type=repo_type))
149
+
150
+ api = HfApi(
151
+ library_name=library_name,
152
+ library_version=library_version,
153
+ user_agent=user_agent,
154
+ endpoint=endpoint,
155
+ headers=headers,
156
+ token=token,
157
+ )
158
+
159
+ repo_info: Union[ModelInfo, DatasetInfo, SpaceInfo, None] = None
160
+ api_call_error: Optional[Exception] = None
161
+ if not local_files_only:
162
+ # try/except logic to handle different errors => taken from `hf_hub_download`
163
+ try:
164
+ # if we have internet connection we want to list files to download
165
+ repo_info = api.repo_info(repo_id=repo_id, repo_type=repo_type, revision=revision)
166
+ except (requests.exceptions.SSLError, requests.exceptions.ProxyError):
167
+ # Actually raise for those subclasses of ConnectionError
168
+ raise
169
+ except (
170
+ requests.exceptions.ConnectionError,
171
+ requests.exceptions.Timeout,
172
+ OfflineModeIsEnabled,
173
+ ) as error:
174
+ # Internet connection is down
175
+ # => will try to use local files only
176
+ api_call_error = error
177
+ pass
178
+ except RevisionNotFoundError:
179
+ # The repo was found but the revision doesn't exist on the Hub (never existed or got deleted)
180
+ raise
181
+ except requests.HTTPError as error:
182
+ # Multiple reasons for an http error:
183
+ # - Repository is private and invalid/missing token sent
184
+ # - Repository is gated and invalid/missing token sent
185
+ # - Hub is down (error 500 or 504)
186
+ # => let's switch to 'local_files_only=True' to check if the files are already cached.
187
+ # (if it's not the case, the error will be re-raised)
188
+ api_call_error = error
189
+ pass
190
+
191
+ # At this stage, if `repo_info` is None it means either:
192
+ # - internet connection is down
193
+ # - internet connection is deactivated (local_files_only=True or HF_HUB_OFFLINE=True)
194
+ # - repo is private/gated and invalid/missing token sent
195
+ # - Hub is down
196
+ # => let's look if we can find the appropriate folder in the cache:
197
+ # - if the specified revision is a commit hash, look inside "snapshots".
198
+ # - f the specified revision is a branch or tag, look inside "refs".
199
+ # => if local_dir is not None, we will return the path to the local folder if it exists.
200
+ if repo_info is None:
201
+ # Try to get which commit hash corresponds to the specified revision
202
+ commit_hash = None
203
+ if REGEX_COMMIT_HASH.match(revision):
204
+ commit_hash = revision
205
+ else:
206
+ ref_path = os.path.join(storage_folder, "refs", revision)
207
+ if os.path.exists(ref_path):
208
+ # retrieve commit_hash from refs file
209
+ with open(ref_path) as f:
210
+ commit_hash = f.read()
211
+
212
+ # Try to locate snapshot folder for this commit hash
213
+ if commit_hash is not None and local_dir is None:
214
+ snapshot_folder = os.path.join(storage_folder, "snapshots", commit_hash)
215
+ if os.path.exists(snapshot_folder):
216
+ # Snapshot folder exists => let's return it
217
+ # (but we can't check if all the files are actually there)
218
+ return snapshot_folder
219
+
220
+ # If local_dir is not None, return it if it exists and is not empty
221
+ if local_dir is not None:
222
+ local_dir = Path(local_dir)
223
+ if local_dir.is_dir() and any(local_dir.iterdir()):
224
+ logger.warning(
225
+ f"Returning existing local_dir `{local_dir}` as remote repo cannot be accessed in `snapshot_download` ({api_call_error})."
226
+ )
227
+ return str(local_dir.resolve())
228
+ # If we couldn't find the appropriate folder on disk, raise an error.
229
+ if local_files_only:
230
+ raise LocalEntryNotFoundError(
231
+ "Cannot find an appropriate cached snapshot folder for the specified revision on the local disk and "
232
+ "outgoing traffic has been disabled. To enable repo look-ups and downloads online, pass "
233
+ "'local_files_only=False' as input."
234
+ )
235
+ elif isinstance(api_call_error, OfflineModeIsEnabled):
236
+ raise LocalEntryNotFoundError(
237
+ "Cannot find an appropriate cached snapshot folder for the specified revision on the local disk and "
238
+ "outgoing traffic has been disabled. To enable repo look-ups and downloads online, set "
239
+ "'HF_HUB_OFFLINE=0' as environment variable."
240
+ ) from api_call_error
241
+ elif isinstance(api_call_error, (RepositoryNotFoundError, GatedRepoError)) or (
242
+ isinstance(api_call_error, HfHubHTTPError) and api_call_error.response.status_code == 401
243
+ ):
244
+ # Repo not found, gated, or specific authentication error => let's raise the actual error
245
+ raise api_call_error
246
+ else:
247
+ # Otherwise: most likely a connection issue or Hub downtime => let's warn the user
248
+ raise LocalEntryNotFoundError(
249
+ "An error happened while trying to locate the files on the Hub and we cannot find the appropriate"
250
+ " snapshot folder for the specified revision on the local disk. Please check your internet connection"
251
+ " and try again."
252
+ ) from api_call_error
253
+
254
+ # At this stage, internet connection is up and running
255
+ # => let's download the files!
256
+ assert repo_info.sha is not None, "Repo info returned from server must have a revision sha."
257
+ assert repo_info.siblings is not None, "Repo info returned from server must have a siblings list."
258
+
259
+ # Corner case: on very large repos, the siblings list in `repo_info` might not contain all files.
260
+ # In that case, we need to use the `list_repo_tree` method to prevent caching issues.
261
+ repo_files: Iterable[str] = [f.rfilename for f in repo_info.siblings]
262
+ has_many_files = len(repo_info.siblings) > VERY_LARGE_REPO_THRESHOLD
263
+ if has_many_files:
264
+ logger.info("The repo has more than 50,000 files. Using `list_repo_tree` to ensure all files are listed.")
265
+ repo_files = (
266
+ f.rfilename
267
+ for f in api.list_repo_tree(repo_id=repo_id, recursive=True, revision=revision, repo_type=repo_type)
268
+ if isinstance(f, RepoFile)
269
+ )
270
+
271
+ filtered_repo_files: Iterable[str] = filter_repo_objects(
272
+ items=repo_files,
273
+ allow_patterns=allow_patterns,
274
+ ignore_patterns=ignore_patterns,
275
+ )
276
+
277
+ if not has_many_files:
278
+ filtered_repo_files = list(filtered_repo_files)
279
+ tqdm_desc = f"Fetching {len(filtered_repo_files)} files"
280
+ else:
281
+ tqdm_desc = "Fetching ... files"
282
+
283
+ commit_hash = repo_info.sha
284
+ snapshot_folder = os.path.join(storage_folder, "snapshots", commit_hash)
285
+ # if passed revision is not identical to commit_hash
286
+ # then revision has to be a branch name or tag name.
287
+ # In that case store a ref.
288
+ if revision != commit_hash:
289
+ ref_path = os.path.join(storage_folder, "refs", revision)
290
+ try:
291
+ os.makedirs(os.path.dirname(ref_path), exist_ok=True)
292
+ with open(ref_path, "w") as f:
293
+ f.write(commit_hash)
294
+ except OSError as e:
295
+ logger.warning(f"Ignored error while writing commit hash to {ref_path}: {e}.")
296
+
297
+ # we pass the commit_hash to hf_hub_download
298
+ # so no network call happens if we already
299
+ # have the file locally.
300
+ def _inner_hf_hub_download(repo_file: str):
301
+ return hf_hub_download(
302
+ repo_id,
303
+ filename=repo_file,
304
+ repo_type=repo_type,
305
+ revision=commit_hash,
306
+ endpoint=endpoint,
307
+ cache_dir=cache_dir,
308
+ local_dir=local_dir,
309
+ local_dir_use_symlinks=local_dir_use_symlinks,
310
+ library_name=library_name,
311
+ library_version=library_version,
312
+ user_agent=user_agent,
313
+ proxies=proxies,
314
+ etag_timeout=etag_timeout,
315
+ resume_download=resume_download,
316
+ force_download=force_download,
317
+ token=token,
318
+ headers=headers,
319
+ )
320
+
321
+ if constants.HF_HUB_ENABLE_HF_TRANSFER:
322
+ # when using hf_transfer we don't want extra parallelism
323
+ # from the one hf_transfer provides
324
+ for file in filtered_repo_files:
325
+ _inner_hf_hub_download(file)
326
+ else:
327
+ thread_map(
328
+ _inner_hf_hub_download,
329
+ filtered_repo_files,
330
+ desc=tqdm_desc,
331
+ max_workers=max_workers,
332
+ # User can use its own tqdm class or the default one from `huggingface_hub.utils`
333
+ tqdm_class=tqdm_class or hf_tqdm,
334
+ )
335
+
336
+ if local_dir is not None:
337
+ return str(os.path.realpath(local_dir))
338
+ return snapshot_folder
.venv/lib/python3.13/site-packages/huggingface_hub/_tensorboard_logger.py ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """Contains a logger to push training logs to the Hub, using Tensorboard."""
15
+
16
+ from pathlib import Path
17
+ from typing import TYPE_CHECKING, List, Optional, Union
18
+
19
+ from ._commit_scheduler import CommitScheduler
20
+ from .errors import EntryNotFoundError
21
+ from .repocard import ModelCard
22
+ from .utils import experimental
23
+
24
+
25
+ # Depending on user's setup, SummaryWriter can come either from 'tensorboardX'
26
+ # or from 'torch.utils.tensorboard'. Both are compatible so let's try to load
27
+ # from either of them.
28
+ try:
29
+ from tensorboardX import SummaryWriter
30
+
31
+ is_summary_writer_available = True
32
+
33
+ except ImportError:
34
+ try:
35
+ from torch.utils.tensorboard import SummaryWriter
36
+
37
+ is_summary_writer_available = False
38
+ except ImportError:
39
+ # Dummy class to avoid failing at import. Will raise on instance creation.
40
+ SummaryWriter = object
41
+ is_summary_writer_available = False
42
+
43
+ if TYPE_CHECKING:
44
+ from tensorboardX import SummaryWriter
45
+
46
+
47
+ class HFSummaryWriter(SummaryWriter):
48
+ """
49
+ Wrapper around the tensorboard's `SummaryWriter` to push training logs to the Hub.
50
+
51
+ Data is logged locally and then pushed to the Hub asynchronously. Pushing data to the Hub is done in a separate
52
+ thread to avoid blocking the training script. In particular, if the upload fails for any reason (e.g. a connection
53
+ issue), the main script will not be interrupted. Data is automatically pushed to the Hub every `commit_every`
54
+ minutes (default to every 5 minutes).
55
+
56
+ <Tip warning={true}>
57
+
58
+ `HFSummaryWriter` is experimental. Its API is subject to change in the future without prior notice.
59
+
60
+ </Tip>
61
+
62
+ Args:
63
+ repo_id (`str`):
64
+ The id of the repo to which the logs will be pushed.
65
+ logdir (`str`, *optional*):
66
+ The directory where the logs will be written. If not specified, a local directory will be created by the
67
+ underlying `SummaryWriter` object.
68
+ commit_every (`int` or `float`, *optional*):
69
+ The frequency (in minutes) at which the logs will be pushed to the Hub. Defaults to 5 minutes.
70
+ squash_history (`bool`, *optional*):
71
+ Whether to squash the history of the repo after each commit. Defaults to `False`. Squashing commits is
72
+ useful to avoid degraded performances on the repo when it grows too large.
73
+ repo_type (`str`, *optional*):
74
+ The type of the repo to which the logs will be pushed. Defaults to "model".
75
+ repo_revision (`str`, *optional*):
76
+ The revision of the repo to which the logs will be pushed. Defaults to "main".
77
+ repo_private (`bool`, *optional*):
78
+ Whether to make the repo private. If `None` (default), the repo will be public unless the organization's default is private. This value is ignored if the repo already exists.
79
+ path_in_repo (`str`, *optional*):
80
+ The path to the folder in the repo where the logs will be pushed. Defaults to "tensorboard/".
81
+ repo_allow_patterns (`List[str]` or `str`, *optional*):
82
+ A list of patterns to include in the upload. Defaults to `"*.tfevents.*"`. Check out the
83
+ [upload guide](https://huggingface.co/docs/huggingface_hub/guides/upload#upload-a-folder) for more details.
84
+ repo_ignore_patterns (`List[str]` or `str`, *optional*):
85
+ A list of patterns to exclude in the upload. Check out the
86
+ [upload guide](https://huggingface.co/docs/huggingface_hub/guides/upload#upload-a-folder) for more details.
87
+ token (`str`, *optional*):
88
+ Authentication token. Will default to the stored token. See https://huggingface.co/settings/token for more
89
+ details
90
+ kwargs:
91
+ Additional keyword arguments passed to `SummaryWriter`.
92
+
93
+ Examples:
94
+ ```diff
95
+ # Taken from https://pytorch.org/docs/stable/tensorboard.html
96
+ - from torch.utils.tensorboard import SummaryWriter
97
+ + from huggingface_hub import HFSummaryWriter
98
+
99
+ import numpy as np
100
+
101
+ - writer = SummaryWriter()
102
+ + writer = HFSummaryWriter(repo_id="username/my-trained-model")
103
+
104
+ for n_iter in range(100):
105
+ writer.add_scalar('Loss/train', np.random.random(), n_iter)
106
+ writer.add_scalar('Loss/test', np.random.random(), n_iter)
107
+ writer.add_scalar('Accuracy/train', np.random.random(), n_iter)
108
+ writer.add_scalar('Accuracy/test', np.random.random(), n_iter)
109
+ ```
110
+
111
+ ```py
112
+ >>> from huggingface_hub import HFSummaryWriter
113
+
114
+ # Logs are automatically pushed every 15 minutes (5 by default) + when exiting the context manager
115
+ >>> with HFSummaryWriter(repo_id="test_hf_logger", commit_every=15) as logger:
116
+ ... logger.add_scalar("a", 1)
117
+ ... logger.add_scalar("b", 2)
118
+ ```
119
+ """
120
+
121
+ @experimental
122
+ def __new__(cls, *args, **kwargs) -> "HFSummaryWriter":
123
+ if not is_summary_writer_available:
124
+ raise ImportError(
125
+ "You must have `tensorboard` installed to use `HFSummaryWriter`. Please run `pip install --upgrade"
126
+ " tensorboardX` first."
127
+ )
128
+ return super().__new__(cls)
129
+
130
+ def __init__(
131
+ self,
132
+ repo_id: str,
133
+ *,
134
+ logdir: Optional[str] = None,
135
+ commit_every: Union[int, float] = 5,
136
+ squash_history: bool = False,
137
+ repo_type: Optional[str] = None,
138
+ repo_revision: Optional[str] = None,
139
+ repo_private: Optional[bool] = None,
140
+ path_in_repo: Optional[str] = "tensorboard",
141
+ repo_allow_patterns: Optional[Union[List[str], str]] = "*.tfevents.*",
142
+ repo_ignore_patterns: Optional[Union[List[str], str]] = None,
143
+ token: Optional[str] = None,
144
+ **kwargs,
145
+ ):
146
+ # Initialize SummaryWriter
147
+ super().__init__(logdir=logdir, **kwargs)
148
+
149
+ # Check logdir has been correctly initialized and fail early otherwise. In practice, SummaryWriter takes care of it.
150
+ if not isinstance(self.logdir, str):
151
+ raise ValueError(f"`self.logdir` must be a string. Got '{self.logdir}' of type {type(self.logdir)}.")
152
+
153
+ # Append logdir name to `path_in_repo`
154
+ if path_in_repo is None or path_in_repo == "":
155
+ path_in_repo = Path(self.logdir).name
156
+ else:
157
+ path_in_repo = path_in_repo.strip("/") + "/" + Path(self.logdir).name
158
+
159
+ # Initialize scheduler
160
+ self.scheduler = CommitScheduler(
161
+ folder_path=self.logdir,
162
+ path_in_repo=path_in_repo,
163
+ repo_id=repo_id,
164
+ repo_type=repo_type,
165
+ revision=repo_revision,
166
+ private=repo_private,
167
+ token=token,
168
+ allow_patterns=repo_allow_patterns,
169
+ ignore_patterns=repo_ignore_patterns,
170
+ every=commit_every,
171
+ squash_history=squash_history,
172
+ )
173
+
174
+ # Exposing some high-level info at root level
175
+ self.repo_id = self.scheduler.repo_id
176
+ self.repo_type = self.scheduler.repo_type
177
+ self.repo_revision = self.scheduler.revision
178
+
179
+ # Add `hf-summary-writer` tag to the model card metadata
180
+ try:
181
+ card = ModelCard.load(repo_id_or_path=self.repo_id, repo_type=self.repo_type)
182
+ except EntryNotFoundError:
183
+ card = ModelCard("")
184
+ tags = card.data.get("tags", [])
185
+ if "hf-summary-writer" not in tags:
186
+ tags.append("hf-summary-writer")
187
+ card.data["tags"] = tags
188
+ card.push_to_hub(repo_id=self.repo_id, repo_type=self.repo_type)
189
+
190
+ def __exit__(self, exc_type, exc_val, exc_tb):
191
+ """Push to hub in a non-blocking way when exiting the logger's context manager."""
192
+ super().__exit__(exc_type, exc_val, exc_tb)
193
+ future = self.scheduler.trigger()
194
+ future.result()
.venv/lib/python3.13/site-packages/huggingface_hub/_upload_large_folder.py ADDED
@@ -0,0 +1,625 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024-present, the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ import enum
16
+ import logging
17
+ import os
18
+ import queue
19
+ import shutil
20
+ import sys
21
+ import threading
22
+ import time
23
+ import traceback
24
+ from datetime import datetime
25
+ from pathlib import Path
26
+ from threading import Lock
27
+ from typing import TYPE_CHECKING, List, Optional, Tuple, Union
28
+ from urllib.parse import quote
29
+
30
+ from . import constants
31
+ from ._commit_api import CommitOperationAdd, UploadInfo, _fetch_upload_modes
32
+ from ._local_folder import LocalUploadFileMetadata, LocalUploadFilePaths, get_local_upload_paths, read_upload_metadata
33
+ from .constants import DEFAULT_REVISION, REPO_TYPES
34
+ from .utils import DEFAULT_IGNORE_PATTERNS, filter_repo_objects, tqdm
35
+ from .utils._cache_manager import _format_size
36
+ from .utils.sha import sha_fileobj
37
+
38
+
39
+ if TYPE_CHECKING:
40
+ from .hf_api import HfApi
41
+
42
+ logger = logging.getLogger(__name__)
43
+
44
+ WAITING_TIME_IF_NO_TASKS = 10 # seconds
45
+ MAX_NB_FILES_FETCH_UPLOAD_MODE = 100
46
+ COMMIT_SIZE_SCALE: List[int] = [20, 50, 75, 100, 125, 200, 250, 400, 600, 1000]
47
+
48
+
49
+ def upload_large_folder_internal(
50
+ api: "HfApi",
51
+ repo_id: str,
52
+ folder_path: Union[str, Path],
53
+ *,
54
+ repo_type: str, # Repo type is required!
55
+ revision: Optional[str] = None,
56
+ private: Optional[bool] = None,
57
+ allow_patterns: Optional[Union[List[str], str]] = None,
58
+ ignore_patterns: Optional[Union[List[str], str]] = None,
59
+ num_workers: Optional[int] = None,
60
+ print_report: bool = True,
61
+ print_report_every: int = 60,
62
+ ):
63
+ """Upload a large folder to the Hub in the most resilient way possible.
64
+
65
+ See [`HfApi.upload_large_folder`] for the full documentation.
66
+ """
67
+ # 1. Check args and setup
68
+ if repo_type is None:
69
+ raise ValueError(
70
+ "For large uploads, `repo_type` is explicitly required. Please set it to `model`, `dataset` or `space`."
71
+ " If you are using the CLI, pass it as `--repo-type=model`."
72
+ )
73
+ if repo_type not in REPO_TYPES:
74
+ raise ValueError(f"Invalid repo type, must be one of {REPO_TYPES}")
75
+ if revision is None:
76
+ revision = DEFAULT_REVISION
77
+
78
+ folder_path = Path(folder_path).expanduser().resolve()
79
+ if not folder_path.is_dir():
80
+ raise ValueError(f"Provided path: '{folder_path}' is not a directory")
81
+
82
+ if ignore_patterns is None:
83
+ ignore_patterns = []
84
+ elif isinstance(ignore_patterns, str):
85
+ ignore_patterns = [ignore_patterns]
86
+ ignore_patterns += DEFAULT_IGNORE_PATTERNS
87
+
88
+ if num_workers is None:
89
+ nb_cores = os.cpu_count() or 1
90
+ num_workers = max(nb_cores - 2, 2) # Use all but 2 cores, or at least 2 cores
91
+
92
+ # 2. Create repo if missing
93
+ repo_url = api.create_repo(repo_id=repo_id, repo_type=repo_type, private=private, exist_ok=True)
94
+ logger.info(f"Repo created: {repo_url}")
95
+ repo_id = repo_url.repo_id
96
+
97
+ # 3. List files to upload
98
+ filtered_paths_list = filter_repo_objects(
99
+ (path.relative_to(folder_path).as_posix() for path in folder_path.glob("**/*") if path.is_file()),
100
+ allow_patterns=allow_patterns,
101
+ ignore_patterns=ignore_patterns,
102
+ )
103
+ paths_list = [get_local_upload_paths(folder_path, relpath) for relpath in filtered_paths_list]
104
+ logger.info(f"Found {len(paths_list)} candidate files to upload")
105
+
106
+ # Read metadata for each file
107
+ items = [
108
+ (paths, read_upload_metadata(folder_path, paths.path_in_repo))
109
+ for paths in tqdm(paths_list, desc="Recovering from metadata files")
110
+ ]
111
+
112
+ # 4. Start workers
113
+ status = LargeUploadStatus(items)
114
+ threads = [
115
+ threading.Thread(
116
+ target=_worker_job,
117
+ kwargs={
118
+ "status": status,
119
+ "api": api,
120
+ "repo_id": repo_id,
121
+ "repo_type": repo_type,
122
+ "revision": revision,
123
+ },
124
+ )
125
+ for _ in range(num_workers)
126
+ ]
127
+
128
+ for thread in threads:
129
+ thread.start()
130
+
131
+ # 5. Print regular reports
132
+ if print_report:
133
+ print("\n\n" + status.current_report())
134
+ last_report_ts = time.time()
135
+ while True:
136
+ time.sleep(1)
137
+ if time.time() - last_report_ts >= print_report_every:
138
+ if print_report:
139
+ _print_overwrite(status.current_report())
140
+ last_report_ts = time.time()
141
+ if status.is_done():
142
+ logging.info("Is done: exiting main loop")
143
+ break
144
+
145
+ for thread in threads:
146
+ thread.join()
147
+
148
+ logger.info(status.current_report())
149
+ logging.info("Upload is complete!")
150
+
151
+
152
+ ####################
153
+ # Logic to manage workers and synchronize tasks
154
+ ####################
155
+
156
+
157
+ class WorkerJob(enum.Enum):
158
+ SHA256 = enum.auto()
159
+ GET_UPLOAD_MODE = enum.auto()
160
+ PREUPLOAD_LFS = enum.auto()
161
+ COMMIT = enum.auto()
162
+ WAIT = enum.auto() # if no tasks are available but we don't want to exit
163
+
164
+
165
+ JOB_ITEM_T = Tuple[LocalUploadFilePaths, LocalUploadFileMetadata]
166
+
167
+
168
+ class LargeUploadStatus:
169
+ """Contains information, queues and tasks for a large upload process."""
170
+
171
+ def __init__(self, items: List[JOB_ITEM_T]):
172
+ self.items = items
173
+ self.queue_sha256: "queue.Queue[JOB_ITEM_T]" = queue.Queue()
174
+ self.queue_get_upload_mode: "queue.Queue[JOB_ITEM_T]" = queue.Queue()
175
+ self.queue_preupload_lfs: "queue.Queue[JOB_ITEM_T]" = queue.Queue()
176
+ self.queue_commit: "queue.Queue[JOB_ITEM_T]" = queue.Queue()
177
+ self.lock = Lock()
178
+
179
+ self.nb_workers_sha256: int = 0
180
+ self.nb_workers_get_upload_mode: int = 0
181
+ self.nb_workers_preupload_lfs: int = 0
182
+ self.nb_workers_commit: int = 0
183
+ self.nb_workers_waiting: int = 0
184
+ self.last_commit_attempt: Optional[float] = None
185
+
186
+ self._started_at = datetime.now()
187
+ self._chunk_idx: int = 1
188
+ self._chunk_lock: Lock = Lock()
189
+
190
+ # Setup queues
191
+ for item in self.items:
192
+ paths, metadata = item
193
+ if metadata.sha256 is None:
194
+ self.queue_sha256.put(item)
195
+ elif metadata.upload_mode is None:
196
+ self.queue_get_upload_mode.put(item)
197
+ elif metadata.upload_mode == "lfs" and not metadata.is_uploaded:
198
+ self.queue_preupload_lfs.put(item)
199
+ elif not metadata.is_committed:
200
+ self.queue_commit.put(item)
201
+ else:
202
+ logger.debug(f"Skipping file {paths.path_in_repo} (already uploaded and committed)")
203
+
204
+ def target_chunk(self) -> int:
205
+ with self._chunk_lock:
206
+ return COMMIT_SIZE_SCALE[self._chunk_idx]
207
+
208
+ def update_chunk(self, success: bool, nb_items: int, duration: float) -> None:
209
+ with self._chunk_lock:
210
+ if not success:
211
+ logger.warning(f"Failed to commit {nb_items} files at once. Will retry with less files in next batch.")
212
+ self._chunk_idx -= 1
213
+ elif nb_items >= COMMIT_SIZE_SCALE[self._chunk_idx] and duration < 40:
214
+ logger.info(f"Successfully committed {nb_items} at once. Increasing the limit for next batch.")
215
+ self._chunk_idx += 1
216
+
217
+ self._chunk_idx = max(0, min(self._chunk_idx, len(COMMIT_SIZE_SCALE) - 1))
218
+
219
+ def current_report(self) -> str:
220
+ """Generate a report of the current status of the large upload."""
221
+ nb_hashed = 0
222
+ size_hashed = 0
223
+ nb_preuploaded = 0
224
+ nb_lfs = 0
225
+ nb_lfs_unsure = 0
226
+ size_preuploaded = 0
227
+ nb_committed = 0
228
+ size_committed = 0
229
+ total_size = 0
230
+ ignored_files = 0
231
+ total_files = 0
232
+
233
+ with self.lock:
234
+ for _, metadata in self.items:
235
+ if metadata.should_ignore:
236
+ ignored_files += 1
237
+ continue
238
+ total_size += metadata.size
239
+ total_files += 1
240
+ if metadata.sha256 is not None:
241
+ nb_hashed += 1
242
+ size_hashed += metadata.size
243
+ if metadata.upload_mode == "lfs":
244
+ nb_lfs += 1
245
+ if metadata.upload_mode is None:
246
+ nb_lfs_unsure += 1
247
+ if metadata.is_uploaded:
248
+ nb_preuploaded += 1
249
+ size_preuploaded += metadata.size
250
+ if metadata.is_committed:
251
+ nb_committed += 1
252
+ size_committed += metadata.size
253
+ total_size_str = _format_size(total_size)
254
+
255
+ now = datetime.now()
256
+ now_str = now.strftime("%Y-%m-%d %H:%M:%S")
257
+ elapsed = now - self._started_at
258
+ elapsed_str = str(elapsed).split(".")[0] # remove milliseconds
259
+
260
+ message = "\n" + "-" * 10
261
+ message += f" {now_str} ({elapsed_str}) "
262
+ message += "-" * 10 + "\n"
263
+
264
+ message += "Files: "
265
+ message += f"hashed {nb_hashed}/{total_files} ({_format_size(size_hashed)}/{total_size_str}) | "
266
+ message += f"pre-uploaded: {nb_preuploaded}/{nb_lfs} ({_format_size(size_preuploaded)}/{total_size_str})"
267
+ if nb_lfs_unsure > 0:
268
+ message += f" (+{nb_lfs_unsure} unsure)"
269
+ message += f" | committed: {nb_committed}/{total_files} ({_format_size(size_committed)}/{total_size_str})"
270
+ message += f" | ignored: {ignored_files}\n"
271
+
272
+ message += "Workers: "
273
+ message += f"hashing: {self.nb_workers_sha256} | "
274
+ message += f"get upload mode: {self.nb_workers_get_upload_mode} | "
275
+ message += f"pre-uploading: {self.nb_workers_preupload_lfs} | "
276
+ message += f"committing: {self.nb_workers_commit} | "
277
+ message += f"waiting: {self.nb_workers_waiting}\n"
278
+ message += "-" * 51
279
+
280
+ return message
281
+
282
+ def is_done(self) -> bool:
283
+ with self.lock:
284
+ return all(metadata.is_committed or metadata.should_ignore for _, metadata in self.items)
285
+
286
+
287
+ def _worker_job(
288
+ status: LargeUploadStatus,
289
+ api: "HfApi",
290
+ repo_id: str,
291
+ repo_type: str,
292
+ revision: str,
293
+ ):
294
+ """
295
+ Main process for a worker. The worker will perform tasks based on the priority list until all files are uploaded
296
+ and committed. If no tasks are available, the worker will wait for 10 seconds before checking again.
297
+
298
+ If a task fails for any reason, the item(s) are put back in the queue for another worker to pick up.
299
+
300
+ Read `upload_large_folder` docstring for more information on how tasks are prioritized.
301
+ """
302
+ while True:
303
+ next_job: Optional[Tuple[WorkerJob, List[JOB_ITEM_T]]] = None
304
+
305
+ # Determine next task
306
+ next_job = _determine_next_job(status)
307
+ if next_job is None:
308
+ return
309
+ job, items = next_job
310
+
311
+ # Perform task
312
+ if job == WorkerJob.SHA256:
313
+ item = items[0] # single item
314
+ try:
315
+ _compute_sha256(item)
316
+ status.queue_get_upload_mode.put(item)
317
+ except KeyboardInterrupt:
318
+ raise
319
+ except Exception as e:
320
+ logger.error(f"Failed to compute sha256: {e}")
321
+ traceback.format_exc()
322
+ status.queue_sha256.put(item)
323
+
324
+ with status.lock:
325
+ status.nb_workers_sha256 -= 1
326
+
327
+ elif job == WorkerJob.GET_UPLOAD_MODE:
328
+ try:
329
+ _get_upload_mode(items, api=api, repo_id=repo_id, repo_type=repo_type, revision=revision)
330
+ except KeyboardInterrupt:
331
+ raise
332
+ except Exception as e:
333
+ logger.error(f"Failed to get upload mode: {e}")
334
+ traceback.format_exc()
335
+
336
+ # Items are either:
337
+ # - dropped (if should_ignore)
338
+ # - put in LFS queue (if LFS)
339
+ # - put in commit queue (if regular)
340
+ # - or put back (if error occurred).
341
+ for item in items:
342
+ _, metadata = item
343
+ if metadata.should_ignore:
344
+ continue
345
+ if metadata.upload_mode == "lfs":
346
+ status.queue_preupload_lfs.put(item)
347
+ elif metadata.upload_mode == "regular":
348
+ status.queue_commit.put(item)
349
+ else:
350
+ status.queue_get_upload_mode.put(item)
351
+
352
+ with status.lock:
353
+ status.nb_workers_get_upload_mode -= 1
354
+
355
+ elif job == WorkerJob.PREUPLOAD_LFS:
356
+ item = items[0] # single item
357
+ try:
358
+ _preupload_lfs(item, api=api, repo_id=repo_id, repo_type=repo_type, revision=revision)
359
+ status.queue_commit.put(item)
360
+ except KeyboardInterrupt:
361
+ raise
362
+ except Exception as e:
363
+ logger.error(f"Failed to preupload LFS: {e}")
364
+ traceback.format_exc()
365
+ status.queue_preupload_lfs.put(item)
366
+
367
+ with status.lock:
368
+ status.nb_workers_preupload_lfs -= 1
369
+
370
+ elif job == WorkerJob.COMMIT:
371
+ start_ts = time.time()
372
+ success = True
373
+ try:
374
+ _commit(items, api=api, repo_id=repo_id, repo_type=repo_type, revision=revision)
375
+ except KeyboardInterrupt:
376
+ raise
377
+ except Exception as e:
378
+ logger.error(f"Failed to commit: {e}")
379
+ traceback.format_exc()
380
+ for item in items:
381
+ status.queue_commit.put(item)
382
+ success = False
383
+ duration = time.time() - start_ts
384
+ status.update_chunk(success, len(items), duration)
385
+ with status.lock:
386
+ status.last_commit_attempt = time.time()
387
+ status.nb_workers_commit -= 1
388
+
389
+ elif job == WorkerJob.WAIT:
390
+ time.sleep(WAITING_TIME_IF_NO_TASKS)
391
+ with status.lock:
392
+ status.nb_workers_waiting -= 1
393
+
394
+
395
+ def _determine_next_job(status: LargeUploadStatus) -> Optional[Tuple[WorkerJob, List[JOB_ITEM_T]]]:
396
+ with status.lock:
397
+ # 1. Commit if more than 5 minutes since last commit attempt (and at least 1 file)
398
+ if (
399
+ status.nb_workers_commit == 0
400
+ and status.queue_commit.qsize() > 0
401
+ and status.last_commit_attempt is not None
402
+ and time.time() - status.last_commit_attempt > 5 * 60
403
+ ):
404
+ status.nb_workers_commit += 1
405
+ logger.debug("Job: commit (more than 5 minutes since last commit attempt)")
406
+ return (WorkerJob.COMMIT, _get_n(status.queue_commit, status.target_chunk()))
407
+
408
+ # 2. Commit if at least 100 files are ready to commit
409
+ elif status.nb_workers_commit == 0 and status.queue_commit.qsize() >= 150:
410
+ status.nb_workers_commit += 1
411
+ logger.debug("Job: commit (>100 files ready)")
412
+ return (WorkerJob.COMMIT, _get_n(status.queue_commit, status.target_chunk()))
413
+
414
+ # 3. Get upload mode if at least 100 files
415
+ elif status.queue_get_upload_mode.qsize() >= MAX_NB_FILES_FETCH_UPLOAD_MODE:
416
+ status.nb_workers_get_upload_mode += 1
417
+ logger.debug(f"Job: get upload mode (>{MAX_NB_FILES_FETCH_UPLOAD_MODE} files ready)")
418
+ return (WorkerJob.GET_UPLOAD_MODE, _get_n(status.queue_get_upload_mode, MAX_NB_FILES_FETCH_UPLOAD_MODE))
419
+
420
+ # 4. Preupload LFS file if at least 1 file and no worker is preuploading LFS
421
+ elif status.queue_preupload_lfs.qsize() > 0 and status.nb_workers_preupload_lfs == 0:
422
+ status.nb_workers_preupload_lfs += 1
423
+ logger.debug("Job: preupload LFS (no other worker preuploading LFS)")
424
+ return (WorkerJob.PREUPLOAD_LFS, _get_one(status.queue_preupload_lfs))
425
+
426
+ # 5. Compute sha256 if at least 1 file and no worker is computing sha256
427
+ elif status.queue_sha256.qsize() > 0 and status.nb_workers_sha256 == 0:
428
+ status.nb_workers_sha256 += 1
429
+ logger.debug("Job: sha256 (no other worker computing sha256)")
430
+ return (WorkerJob.SHA256, _get_one(status.queue_sha256))
431
+
432
+ # 6. Get upload mode if at least 1 file and no worker is getting upload mode
433
+ elif status.queue_get_upload_mode.qsize() > 0 and status.nb_workers_get_upload_mode == 0:
434
+ status.nb_workers_get_upload_mode += 1
435
+ logger.debug("Job: get upload mode (no other worker getting upload mode)")
436
+ return (WorkerJob.GET_UPLOAD_MODE, _get_n(status.queue_get_upload_mode, MAX_NB_FILES_FETCH_UPLOAD_MODE))
437
+
438
+ # 7. Preupload LFS file if at least 1 file
439
+ # Skip if hf_transfer is enabled and there is already a worker preuploading LFS
440
+ elif status.queue_preupload_lfs.qsize() > 0 and (
441
+ status.nb_workers_preupload_lfs == 0 or not constants.HF_HUB_ENABLE_HF_TRANSFER
442
+ ):
443
+ status.nb_workers_preupload_lfs += 1
444
+ logger.debug("Job: preupload LFS")
445
+ return (WorkerJob.PREUPLOAD_LFS, _get_one(status.queue_preupload_lfs))
446
+
447
+ # 8. Compute sha256 if at least 1 file
448
+ elif status.queue_sha256.qsize() > 0:
449
+ status.nb_workers_sha256 += 1
450
+ logger.debug("Job: sha256")
451
+ return (WorkerJob.SHA256, _get_one(status.queue_sha256))
452
+
453
+ # 9. Get upload mode if at least 1 file
454
+ elif status.queue_get_upload_mode.qsize() > 0:
455
+ status.nb_workers_get_upload_mode += 1
456
+ logger.debug("Job: get upload mode")
457
+ return (WorkerJob.GET_UPLOAD_MODE, _get_n(status.queue_get_upload_mode, MAX_NB_FILES_FETCH_UPLOAD_MODE))
458
+
459
+ # 10. Commit if at least 1 file and 1 min since last commit attempt
460
+ elif (
461
+ status.nb_workers_commit == 0
462
+ and status.queue_commit.qsize() > 0
463
+ and status.last_commit_attempt is not None
464
+ and time.time() - status.last_commit_attempt > 1 * 60
465
+ ):
466
+ status.nb_workers_commit += 1
467
+ logger.debug("Job: commit (1 min since last commit attempt)")
468
+ return (WorkerJob.COMMIT, _get_n(status.queue_commit, status.target_chunk()))
469
+
470
+ # 11. Commit if at least 1 file all other queues are empty and all workers are waiting
471
+ # e.g. when it's the last commit
472
+ elif (
473
+ status.nb_workers_commit == 0
474
+ and status.queue_commit.qsize() > 0
475
+ and status.queue_sha256.qsize() == 0
476
+ and status.queue_get_upload_mode.qsize() == 0
477
+ and status.queue_preupload_lfs.qsize() == 0
478
+ and status.nb_workers_sha256 == 0
479
+ and status.nb_workers_get_upload_mode == 0
480
+ and status.nb_workers_preupload_lfs == 0
481
+ ):
482
+ status.nb_workers_commit += 1
483
+ logger.debug("Job: commit")
484
+ return (WorkerJob.COMMIT, _get_n(status.queue_commit, status.target_chunk()))
485
+
486
+ # 12. If all queues are empty, exit
487
+ elif all(metadata.is_committed or metadata.should_ignore for _, metadata in status.items):
488
+ logger.info("All files have been processed! Exiting worker.")
489
+ return None
490
+
491
+ # 13. If no task is available, wait
492
+ else:
493
+ status.nb_workers_waiting += 1
494
+ logger.debug(f"No task available, waiting... ({WAITING_TIME_IF_NO_TASKS}s)")
495
+ return (WorkerJob.WAIT, [])
496
+
497
+
498
+ ####################
499
+ # Atomic jobs (sha256, get_upload_mode, preupload_lfs, commit)
500
+ ####################
501
+
502
+
503
+ def _compute_sha256(item: JOB_ITEM_T) -> None:
504
+ """Compute sha256 of a file and save it in metadata."""
505
+ paths, metadata = item
506
+ if metadata.sha256 is None:
507
+ with paths.file_path.open("rb") as f:
508
+ metadata.sha256 = sha_fileobj(f).hex()
509
+ metadata.save(paths)
510
+
511
+
512
+ def _get_upload_mode(items: List[JOB_ITEM_T], api: "HfApi", repo_id: str, repo_type: str, revision: str) -> None:
513
+ """Get upload mode for each file and update metadata.
514
+
515
+ Also receive info if the file should be ignored.
516
+ """
517
+ additions = [_build_hacky_operation(item) for item in items]
518
+ _fetch_upload_modes(
519
+ additions=additions,
520
+ repo_type=repo_type,
521
+ repo_id=repo_id,
522
+ headers=api._build_hf_headers(),
523
+ revision=quote(revision, safe=""),
524
+ endpoint=api.endpoint,
525
+ )
526
+ for item, addition in zip(items, additions):
527
+ paths, metadata = item
528
+ metadata.upload_mode = addition._upload_mode
529
+ metadata.should_ignore = addition._should_ignore
530
+ metadata.remote_oid = addition._remote_oid
531
+ metadata.save(paths)
532
+
533
+
534
+ def _preupload_lfs(item: JOB_ITEM_T, api: "HfApi", repo_id: str, repo_type: str, revision: str) -> None:
535
+ """Preupload LFS file and update metadata."""
536
+ paths, metadata = item
537
+ addition = _build_hacky_operation(item)
538
+ api.preupload_lfs_files(
539
+ repo_id=repo_id,
540
+ repo_type=repo_type,
541
+ revision=revision,
542
+ additions=[addition],
543
+ )
544
+
545
+ metadata.is_uploaded = True
546
+ metadata.save(paths)
547
+
548
+
549
+ def _commit(items: List[JOB_ITEM_T], api: "HfApi", repo_id: str, repo_type: str, revision: str) -> None:
550
+ """Commit files to the repo."""
551
+ additions = [_build_hacky_operation(item) for item in items]
552
+ api.create_commit(
553
+ repo_id=repo_id,
554
+ repo_type=repo_type,
555
+ revision=revision,
556
+ operations=additions,
557
+ commit_message="Add files using upload-large-folder tool",
558
+ )
559
+ for paths, metadata in items:
560
+ metadata.is_committed = True
561
+ metadata.save(paths)
562
+
563
+
564
+ ####################
565
+ # Hacks with CommitOperationAdd to bypass checks/sha256 calculation
566
+ ####################
567
+
568
+
569
+ class HackyCommitOperationAdd(CommitOperationAdd):
570
+ def __post_init__(self) -> None:
571
+ if isinstance(self.path_or_fileobj, Path):
572
+ self.path_or_fileobj = str(self.path_or_fileobj)
573
+
574
+
575
+ def _build_hacky_operation(item: JOB_ITEM_T) -> HackyCommitOperationAdd:
576
+ paths, metadata = item
577
+ operation = HackyCommitOperationAdd(path_in_repo=paths.path_in_repo, path_or_fileobj=paths.file_path)
578
+ with paths.file_path.open("rb") as file:
579
+ sample = file.peek(512)[:512]
580
+ if metadata.sha256 is None:
581
+ raise ValueError("sha256 must have been computed by now!")
582
+ operation.upload_info = UploadInfo(sha256=bytes.fromhex(metadata.sha256), size=metadata.size, sample=sample)
583
+ operation._upload_mode = metadata.upload_mode # type: ignore[assignment]
584
+ operation._should_ignore = metadata.should_ignore
585
+ operation._remote_oid = metadata.remote_oid
586
+ return operation
587
+
588
+
589
+ ####################
590
+ # Misc helpers
591
+ ####################
592
+
593
+
594
+ def _get_one(queue: "queue.Queue[JOB_ITEM_T]") -> List[JOB_ITEM_T]:
595
+ return [queue.get()]
596
+
597
+
598
+ def _get_n(queue: "queue.Queue[JOB_ITEM_T]", n: int) -> List[JOB_ITEM_T]:
599
+ return [queue.get() for _ in range(min(queue.qsize(), n))]
600
+
601
+
602
+ def _print_overwrite(report: str) -> None:
603
+ """Print a report, overwriting the previous lines.
604
+
605
+ Since tqdm in using `sys.stderr` to (re-)write progress bars, we need to use `sys.stdout`
606
+ to print the report.
607
+
608
+ Note: works well only if no other process is writing to `sys.stdout`!
609
+ """
610
+ report += "\n"
611
+ # Get terminal width
612
+ terminal_width = shutil.get_terminal_size().columns
613
+
614
+ # Count number of lines that should be cleared
615
+ nb_lines = sum(len(line) // terminal_width + 1 for line in report.splitlines())
616
+
617
+ # Clear previous lines based on the number of lines in the report
618
+ for _ in range(nb_lines):
619
+ sys.stdout.write("\r\033[K") # Clear line
620
+ sys.stdout.write("\033[F") # Move cursor up one line
621
+
622
+ # Print the new report, filling remaining space with whitespace
623
+ sys.stdout.write(report)
624
+ sys.stdout.write(" " * (terminal_width - len(report.splitlines()[-1])))
625
+ sys.stdout.flush()
.venv/lib/python3.13/site-packages/huggingface_hub/dataclasses.py ADDED
@@ -0,0 +1,481 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ from dataclasses import _MISSING_TYPE, MISSING, Field, field, fields
3
+ from functools import wraps
4
+ from typing import (
5
+ Any,
6
+ Callable,
7
+ Dict,
8
+ List,
9
+ Literal,
10
+ Optional,
11
+ Tuple,
12
+ Type,
13
+ TypeVar,
14
+ Union,
15
+ get_args,
16
+ get_origin,
17
+ overload,
18
+ )
19
+
20
+ from .errors import (
21
+ StrictDataclassClassValidationError,
22
+ StrictDataclassDefinitionError,
23
+ StrictDataclassFieldValidationError,
24
+ )
25
+
26
+
27
+ Validator_T = Callable[[Any], None]
28
+ T = TypeVar("T")
29
+
30
+
31
+ # The overload decorator helps type checkers understand the different return types
32
+ @overload
33
+ def strict(cls: Type[T]) -> Type[T]: ...
34
+
35
+
36
+ @overload
37
+ def strict(*, accept_kwargs: bool = False) -> Callable[[Type[T]], Type[T]]: ...
38
+
39
+
40
+ def strict(
41
+ cls: Optional[Type[T]] = None, *, accept_kwargs: bool = False
42
+ ) -> Union[Type[T], Callable[[Type[T]], Type[T]]]:
43
+ """
44
+ Decorator to add strict validation to a dataclass.
45
+
46
+ This decorator must be used on top of `@dataclass` to ensure IDEs and static typing tools
47
+ recognize the class as a dataclass.
48
+
49
+ Can be used with or without arguments:
50
+ - `@strict`
51
+ - `@strict(accept_kwargs=True)`
52
+
53
+ Args:
54
+ cls:
55
+ The class to convert to a strict dataclass.
56
+ accept_kwargs (`bool`, *optional*):
57
+ If True, allows arbitrary keyword arguments in `__init__`. Defaults to False.
58
+
59
+ Returns:
60
+ The enhanced dataclass with strict validation on field assignment.
61
+
62
+ Example:
63
+ ```py
64
+ >>> from dataclasses import dataclass
65
+ >>> from huggingface_hub.dataclasses import as_validated_field, strict, validated_field
66
+
67
+ >>> @as_validated_field
68
+ >>> def positive_int(value: int):
69
+ ... if not value >= 0:
70
+ ... raise ValueError(f"Value must be positive, got {value}")
71
+
72
+ >>> @strict(accept_kwargs=True)
73
+ ... @dataclass
74
+ ... class User:
75
+ ... name: str
76
+ ... age: int = positive_int(default=10)
77
+
78
+ # Initialize
79
+ >>> User(name="John")
80
+ User(name='John', age=10)
81
+
82
+ # Extra kwargs are accepted
83
+ >>> User(name="John", age=30, lastname="Doe")
84
+ User(name='John', age=30, *lastname='Doe')
85
+
86
+ # Invalid type => raises
87
+ >>> User(name="John", age="30")
88
+ huggingface_hub.errors.StrictDataclassFieldValidationError: Validation error for field 'age':
89
+ TypeError: Field 'age' expected int, got str (value: '30')
90
+
91
+ # Invalid value => raises
92
+ >>> User(name="John", age=-1)
93
+ huggingface_hub.errors.StrictDataclassFieldValidationError: Validation error for field 'age':
94
+ ValueError: Value must be positive, got -1
95
+ ```
96
+ """
97
+
98
+ def wrap(cls: Type[T]) -> Type[T]:
99
+ if not hasattr(cls, "__dataclass_fields__"):
100
+ raise StrictDataclassDefinitionError(
101
+ f"Class '{cls.__name__}' must be a dataclass before applying @strict."
102
+ )
103
+
104
+ # List and store validators
105
+ field_validators: Dict[str, List[Validator_T]] = {}
106
+ for f in fields(cls): # type: ignore [arg-type]
107
+ validators = []
108
+ validators.append(_create_type_validator(f))
109
+ custom_validator = f.metadata.get("validator")
110
+ if custom_validator is not None:
111
+ if not isinstance(custom_validator, list):
112
+ custom_validator = [custom_validator]
113
+ for validator in custom_validator:
114
+ if not _is_validator(validator):
115
+ raise StrictDataclassDefinitionError(
116
+ f"Invalid validator for field '{f.name}': {validator}. Must be a callable taking a single argument."
117
+ )
118
+ validators.extend(custom_validator)
119
+ field_validators[f.name] = validators
120
+ cls.__validators__ = field_validators # type: ignore
121
+
122
+ # Override __setattr__ to validate fields on assignment
123
+ original_setattr = cls.__setattr__
124
+
125
+ def __strict_setattr__(self: Any, name: str, value: Any) -> None:
126
+ """Custom __setattr__ method for strict dataclasses."""
127
+ # Run all validators
128
+ for validator in self.__validators__.get(name, []):
129
+ try:
130
+ validator(value)
131
+ except (ValueError, TypeError) as e:
132
+ raise StrictDataclassFieldValidationError(field=name, cause=e) from e
133
+
134
+ # If validation passed, set the attribute
135
+ original_setattr(self, name, value)
136
+
137
+ cls.__setattr__ = __strict_setattr__ # type: ignore[method-assign]
138
+
139
+ if accept_kwargs:
140
+ # (optional) Override __init__ to accept arbitrary keyword arguments
141
+ original_init = cls.__init__
142
+
143
+ @wraps(original_init)
144
+ def __init__(self, **kwargs: Any) -> None:
145
+ # Extract only the fields that are part of the dataclass
146
+ dataclass_fields = {f.name for f in fields(cls)} # type: ignore [arg-type]
147
+ standard_kwargs = {k: v for k, v in kwargs.items() if k in dataclass_fields}
148
+
149
+ # Call the original __init__ with standard fields
150
+ original_init(self, **standard_kwargs)
151
+
152
+ # Add any additional kwargs as attributes
153
+ for name, value in kwargs.items():
154
+ if name not in dataclass_fields:
155
+ self.__setattr__(name, value)
156
+
157
+ cls.__init__ = __init__ # type: ignore[method-assign]
158
+
159
+ # (optional) Override __repr__ to include additional kwargs
160
+ original_repr = cls.__repr__
161
+
162
+ @wraps(original_repr)
163
+ def __repr__(self) -> str:
164
+ # Call the original __repr__ to get the standard fields
165
+ standard_repr = original_repr(self)
166
+
167
+ # Get additional kwargs
168
+ additional_kwargs = [
169
+ # add a '*' in front of additional kwargs to let the user know they are not part of the dataclass
170
+ f"*{k}={v!r}"
171
+ for k, v in self.__dict__.items()
172
+ if k not in cls.__dataclass_fields__ # type: ignore [attr-defined]
173
+ ]
174
+ additional_repr = ", ".join(additional_kwargs)
175
+
176
+ # Combine both representations
177
+ return f"{standard_repr[:-1]}, {additional_repr})" if additional_kwargs else standard_repr
178
+
179
+ cls.__repr__ = __repr__ # type: ignore [method-assign]
180
+
181
+ # List all public methods starting with `validate_` => class validators.
182
+ class_validators = []
183
+
184
+ for name in dir(cls):
185
+ if not name.startswith("validate_"):
186
+ continue
187
+ method = getattr(cls, name)
188
+ if not callable(method):
189
+ continue
190
+ if len(inspect.signature(method).parameters) != 1:
191
+ raise StrictDataclassDefinitionError(
192
+ f"Class '{cls.__name__}' has a class validator '{name}' that takes more than one argument."
193
+ " Class validators must take only 'self' as an argument. Methods starting with 'validate_'"
194
+ " are considered to be class validators."
195
+ )
196
+ class_validators.append(method)
197
+
198
+ cls.__class_validators__ = class_validators # type: ignore [attr-defined]
199
+
200
+ # Add `validate` method to the class, but first check if it already exists
201
+ def validate(self: T) -> None:
202
+ """Run class validators on the instance."""
203
+ for validator in cls.__class_validators__: # type: ignore [attr-defined]
204
+ try:
205
+ validator(self)
206
+ except (ValueError, TypeError) as e:
207
+ raise StrictDataclassClassValidationError(validator=validator.__name__, cause=e) from e
208
+
209
+ # Hack to be able to raise if `.validate()` already exists except if it was created by this decorator on a parent class
210
+ # (in which case we just override it)
211
+ validate.__is_defined_by_strict_decorator__ = True # type: ignore [attr-defined]
212
+
213
+ if hasattr(cls, "validate"):
214
+ if not getattr(cls.validate, "__is_defined_by_strict_decorator__", False): # type: ignore [attr-defined]
215
+ raise StrictDataclassDefinitionError(
216
+ f"Class '{cls.__name__}' already implements a method called 'validate'."
217
+ " This method name is reserved when using the @strict decorator on a dataclass."
218
+ " If you want to keep your own method, please rename it."
219
+ )
220
+
221
+ cls.validate = validate # type: ignore
222
+
223
+ # Run class validators after initialization
224
+ initial_init = cls.__init__
225
+
226
+ @wraps(initial_init)
227
+ def init_with_validate(self, *args, **kwargs) -> None:
228
+ """Run class validators after initialization."""
229
+ initial_init(self, *args, **kwargs) # type: ignore [call-arg]
230
+ cls.validate(self) # type: ignore [attr-defined]
231
+
232
+ setattr(cls, "__init__", init_with_validate)
233
+
234
+ return cls
235
+
236
+ # Return wrapped class or the decorator itself
237
+ return wrap(cls) if cls is not None else wrap
238
+
239
+
240
+ def validated_field(
241
+ validator: Union[List[Validator_T], Validator_T],
242
+ default: Union[Any, _MISSING_TYPE] = MISSING,
243
+ default_factory: Union[Callable[[], Any], _MISSING_TYPE] = MISSING,
244
+ init: bool = True,
245
+ repr: bool = True,
246
+ hash: Optional[bool] = None,
247
+ compare: bool = True,
248
+ metadata: Optional[Dict] = None,
249
+ **kwargs: Any,
250
+ ) -> Any:
251
+ """
252
+ Create a dataclass field with a custom validator.
253
+
254
+ Useful to apply several checks to a field. If only applying one rule, check out the [`as_validated_field`] decorator.
255
+
256
+ Args:
257
+ validator (`Callable` or `List[Callable]`):
258
+ A method that takes a value as input and raises ValueError/TypeError if the value is invalid.
259
+ Can be a list of validators to apply multiple checks.
260
+ **kwargs:
261
+ Additional arguments to pass to `dataclasses.field()`.
262
+
263
+ Returns:
264
+ A field with the validator attached in metadata
265
+ """
266
+ if not isinstance(validator, list):
267
+ validator = [validator]
268
+ if metadata is None:
269
+ metadata = {}
270
+ metadata["validator"] = validator
271
+ return field( # type: ignore
272
+ default=default, # type: ignore [arg-type]
273
+ default_factory=default_factory, # type: ignore [arg-type]
274
+ init=init,
275
+ repr=repr,
276
+ hash=hash,
277
+ compare=compare,
278
+ metadata=metadata,
279
+ **kwargs,
280
+ )
281
+
282
+
283
+ def as_validated_field(validator: Validator_T):
284
+ """
285
+ Decorates a validator function as a [`validated_field`] (i.e. a dataclass field with a custom validator).
286
+
287
+ Args:
288
+ validator (`Callable`):
289
+ A method that takes a value as input and raises ValueError/TypeError if the value is invalid.
290
+ """
291
+
292
+ def _inner(
293
+ default: Union[Any, _MISSING_TYPE] = MISSING,
294
+ default_factory: Union[Callable[[], Any], _MISSING_TYPE] = MISSING,
295
+ init: bool = True,
296
+ repr: bool = True,
297
+ hash: Optional[bool] = None,
298
+ compare: bool = True,
299
+ metadata: Optional[Dict] = None,
300
+ **kwargs: Any,
301
+ ):
302
+ return validated_field(
303
+ validator,
304
+ default=default,
305
+ default_factory=default_factory,
306
+ init=init,
307
+ repr=repr,
308
+ hash=hash,
309
+ compare=compare,
310
+ metadata=metadata,
311
+ **kwargs,
312
+ )
313
+
314
+ return _inner
315
+
316
+
317
+ def type_validator(name: str, value: Any, expected_type: Any) -> None:
318
+ """Validate that 'value' matches 'expected_type'."""
319
+ origin = get_origin(expected_type)
320
+ args = get_args(expected_type)
321
+
322
+ if expected_type is Any:
323
+ return
324
+ elif validator := _BASIC_TYPE_VALIDATORS.get(origin):
325
+ validator(name, value, args)
326
+ elif isinstance(expected_type, type): # simple types
327
+ _validate_simple_type(name, value, expected_type)
328
+ else:
329
+ raise TypeError(f"Unsupported type for field '{name}': {expected_type}")
330
+
331
+
332
+ def _validate_union(name: str, value: Any, args: Tuple[Any, ...]) -> None:
333
+ """Validate that value matches one of the types in a Union."""
334
+ errors = []
335
+ for t in args:
336
+ try:
337
+ type_validator(name, value, t)
338
+ return # Valid if any type matches
339
+ except TypeError as e:
340
+ errors.append(str(e))
341
+
342
+ raise TypeError(
343
+ f"Field '{name}' with value {repr(value)} doesn't match any type in {args}. Errors: {'; '.join(errors)}"
344
+ )
345
+
346
+
347
+ def _validate_literal(name: str, value: Any, args: Tuple[Any, ...]) -> None:
348
+ """Validate Literal type."""
349
+ if value not in args:
350
+ raise TypeError(f"Field '{name}' expected one of {args}, got {value}")
351
+
352
+
353
+ def _validate_list(name: str, value: Any, args: Tuple[Any, ...]) -> None:
354
+ """Validate List[T] type."""
355
+ if not isinstance(value, list):
356
+ raise TypeError(f"Field '{name}' expected a list, got {type(value).__name__}")
357
+
358
+ # Validate each item in the list
359
+ item_type = args[0]
360
+ for i, item in enumerate(value):
361
+ try:
362
+ type_validator(f"{name}[{i}]", item, item_type)
363
+ except TypeError as e:
364
+ raise TypeError(f"Invalid item at index {i} in list '{name}'") from e
365
+
366
+
367
+ def _validate_dict(name: str, value: Any, args: Tuple[Any, ...]) -> None:
368
+ """Validate Dict[K, V] type."""
369
+ if not isinstance(value, dict):
370
+ raise TypeError(f"Field '{name}' expected a dict, got {type(value).__name__}")
371
+
372
+ # Validate keys and values
373
+ key_type, value_type = args
374
+ for k, v in value.items():
375
+ try:
376
+ type_validator(f"{name}.key", k, key_type)
377
+ type_validator(f"{name}[{k!r}]", v, value_type)
378
+ except TypeError as e:
379
+ raise TypeError(f"Invalid key or value in dict '{name}'") from e
380
+
381
+
382
+ def _validate_tuple(name: str, value: Any, args: Tuple[Any, ...]) -> None:
383
+ """Validate Tuple type."""
384
+ if not isinstance(value, tuple):
385
+ raise TypeError(f"Field '{name}' expected a tuple, got {type(value).__name__}")
386
+
387
+ # Handle variable-length tuples: Tuple[T, ...]
388
+ if len(args) == 2 and args[1] is Ellipsis:
389
+ for i, item in enumerate(value):
390
+ try:
391
+ type_validator(f"{name}[{i}]", item, args[0])
392
+ except TypeError as e:
393
+ raise TypeError(f"Invalid item at index {i} in tuple '{name}'") from e
394
+ # Handle fixed-length tuples: Tuple[T1, T2, ...]
395
+ elif len(args) != len(value):
396
+ raise TypeError(f"Field '{name}' expected a tuple of length {len(args)}, got {len(value)}")
397
+ else:
398
+ for i, (item, expected) in enumerate(zip(value, args)):
399
+ try:
400
+ type_validator(f"{name}[{i}]", item, expected)
401
+ except TypeError as e:
402
+ raise TypeError(f"Invalid item at index {i} in tuple '{name}'") from e
403
+
404
+
405
+ def _validate_set(name: str, value: Any, args: Tuple[Any, ...]) -> None:
406
+ """Validate Set[T] type."""
407
+ if not isinstance(value, set):
408
+ raise TypeError(f"Field '{name}' expected a set, got {type(value).__name__}")
409
+
410
+ # Validate each item in the set
411
+ item_type = args[0]
412
+ for i, item in enumerate(value):
413
+ try:
414
+ type_validator(f"{name} item", item, item_type)
415
+ except TypeError as e:
416
+ raise TypeError(f"Invalid item in set '{name}'") from e
417
+
418
+
419
+ def _validate_simple_type(name: str, value: Any, expected_type: type) -> None:
420
+ """Validate simple type (int, str, etc.)."""
421
+ if not isinstance(value, expected_type):
422
+ raise TypeError(
423
+ f"Field '{name}' expected {expected_type.__name__}, got {type(value).__name__} (value: {repr(value)})"
424
+ )
425
+
426
+
427
+ def _create_type_validator(field: Field) -> Validator_T:
428
+ """Create a type validator function for a field."""
429
+ # Hacky: we cannot use a lambda here because of reference issues
430
+
431
+ def validator(value: Any) -> None:
432
+ type_validator(field.name, value, field.type)
433
+
434
+ return validator
435
+
436
+
437
+ def _is_validator(validator: Any) -> bool:
438
+ """Check if a function is a validator.
439
+
440
+ A validator is a Callable that can be called with a single positional argument.
441
+ The validator can have more arguments with default values.
442
+
443
+ Basically, returns True if `validator(value)` is possible.
444
+ """
445
+ if not callable(validator):
446
+ return False
447
+
448
+ signature = inspect.signature(validator)
449
+ parameters = list(signature.parameters.values())
450
+ if len(parameters) == 0:
451
+ return False
452
+ if parameters[0].kind not in (
453
+ inspect.Parameter.POSITIONAL_OR_KEYWORD,
454
+ inspect.Parameter.POSITIONAL_ONLY,
455
+ inspect.Parameter.VAR_POSITIONAL,
456
+ ):
457
+ return False
458
+ for parameter in parameters[1:]:
459
+ if parameter.default == inspect.Parameter.empty:
460
+ return False
461
+ return True
462
+
463
+
464
+ _BASIC_TYPE_VALIDATORS = {
465
+ Union: _validate_union,
466
+ Literal: _validate_literal,
467
+ list: _validate_list,
468
+ dict: _validate_dict,
469
+ tuple: _validate_tuple,
470
+ set: _validate_set,
471
+ }
472
+
473
+
474
+ __all__ = [
475
+ "strict",
476
+ "validated_field",
477
+ "Validator_T",
478
+ "StrictDataclassClassValidationError",
479
+ "StrictDataclassDefinitionError",
480
+ "StrictDataclassFieldValidationError",
481
+ ]
.venv/lib/python3.13/site-packages/huggingface_hub/errors.py ADDED
@@ -0,0 +1,377 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Contains all custom errors."""
2
+
3
+ from pathlib import Path
4
+ from typing import Optional, Union
5
+
6
+ from requests import HTTPError, Response
7
+
8
+
9
+ # CACHE ERRORS
10
+
11
+
12
+ class CacheNotFound(Exception):
13
+ """Exception thrown when the Huggingface cache is not found."""
14
+
15
+ cache_dir: Union[str, Path]
16
+
17
+ def __init__(self, msg: str, cache_dir: Union[str, Path], *args, **kwargs):
18
+ super().__init__(msg, *args, **kwargs)
19
+ self.cache_dir = cache_dir
20
+
21
+
22
+ class CorruptedCacheException(Exception):
23
+ """Exception for any unexpected structure in the Huggingface cache-system."""
24
+
25
+
26
+ # HEADERS ERRORS
27
+
28
+
29
+ class LocalTokenNotFoundError(EnvironmentError):
30
+ """Raised if local token is required but not found."""
31
+
32
+
33
+ # HTTP ERRORS
34
+
35
+
36
+ class OfflineModeIsEnabled(ConnectionError):
37
+ """Raised when a request is made but `HF_HUB_OFFLINE=1` is set as environment variable."""
38
+
39
+
40
+ class HfHubHTTPError(HTTPError):
41
+ """
42
+ HTTPError to inherit from for any custom HTTP Error raised in HF Hub.
43
+
44
+ Any HTTPError is converted at least into a `HfHubHTTPError`. If some information is
45
+ sent back by the server, it will be added to the error message.
46
+
47
+ Added details:
48
+ - Request id from "X-Request-Id" header if exists. If not, fallback to "X-Amzn-Trace-Id" header if exists.
49
+ - Server error message from the header "X-Error-Message".
50
+ - Server error message if we can found one in the response body.
51
+
52
+ Example:
53
+ ```py
54
+ import requests
55
+ from huggingface_hub.utils import get_session, hf_raise_for_status, HfHubHTTPError
56
+
57
+ response = get_session().post(...)
58
+ try:
59
+ hf_raise_for_status(response)
60
+ except HfHubHTTPError as e:
61
+ print(str(e)) # formatted message
62
+ e.request_id, e.server_message # details returned by server
63
+
64
+ # Complete the error message with additional information once it's raised
65
+ e.append_to_message("\n`create_commit` expects the repository to exist.")
66
+ raise
67
+ ```
68
+ """
69
+
70
+ def __init__(self, message: str, response: Optional[Response] = None, *, server_message: Optional[str] = None):
71
+ self.request_id = (
72
+ response.headers.get("x-request-id") or response.headers.get("X-Amzn-Trace-Id")
73
+ if response is not None
74
+ else None
75
+ )
76
+ self.server_message = server_message
77
+
78
+ super().__init__(
79
+ message,
80
+ response=response, # type: ignore [arg-type]
81
+ request=response.request if response is not None else None, # type: ignore [arg-type]
82
+ )
83
+
84
+ def append_to_message(self, additional_message: str) -> None:
85
+ """Append additional information to the `HfHubHTTPError` initial message."""
86
+ self.args = (self.args[0] + additional_message,) + self.args[1:]
87
+
88
+
89
+ # INFERENCE CLIENT ERRORS
90
+
91
+
92
+ class InferenceTimeoutError(HTTPError, TimeoutError):
93
+ """Error raised when a model is unavailable or the request times out."""
94
+
95
+
96
+ # INFERENCE ENDPOINT ERRORS
97
+
98
+
99
+ class InferenceEndpointError(Exception):
100
+ """Generic exception when dealing with Inference Endpoints."""
101
+
102
+
103
+ class InferenceEndpointTimeoutError(InferenceEndpointError, TimeoutError):
104
+ """Exception for timeouts while waiting for Inference Endpoint."""
105
+
106
+
107
+ # SAFETENSORS ERRORS
108
+
109
+
110
+ class SafetensorsParsingError(Exception):
111
+ """Raised when failing to parse a safetensors file metadata.
112
+
113
+ This can be the case if the file is not a safetensors file or does not respect the specification.
114
+ """
115
+
116
+
117
+ class NotASafetensorsRepoError(Exception):
118
+ """Raised when a repo is not a Safetensors repo i.e. doesn't have either a `model.safetensors` or a
119
+ `model.safetensors.index.json` file.
120
+ """
121
+
122
+
123
+ # TEXT GENERATION ERRORS
124
+
125
+
126
+ class TextGenerationError(HTTPError):
127
+ """Generic error raised if text-generation went wrong."""
128
+
129
+
130
+ # Text Generation Inference Errors
131
+ class ValidationError(TextGenerationError):
132
+ """Server-side validation error."""
133
+
134
+
135
+ class GenerationError(TextGenerationError):
136
+ pass
137
+
138
+
139
+ class OverloadedError(TextGenerationError):
140
+ pass
141
+
142
+
143
+ class IncompleteGenerationError(TextGenerationError):
144
+ pass
145
+
146
+
147
+ class UnknownError(TextGenerationError):
148
+ pass
149
+
150
+
151
+ # VALIDATION ERRORS
152
+
153
+
154
+ class HFValidationError(ValueError):
155
+ """Generic exception thrown by `huggingface_hub` validators.
156
+
157
+ Inherits from [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError).
158
+ """
159
+
160
+
161
+ # FILE METADATA ERRORS
162
+
163
+
164
+ class FileMetadataError(OSError):
165
+ """Error triggered when the metadata of a file on the Hub cannot be retrieved (missing ETag or commit_hash).
166
+
167
+ Inherits from `OSError` for backward compatibility.
168
+ """
169
+
170
+
171
+ # REPOSITORY ERRORS
172
+
173
+
174
+ class RepositoryNotFoundError(HfHubHTTPError):
175
+ """
176
+ Raised when trying to access a hf.co URL with an invalid repository name, or
177
+ with a private repo name the user does not have access to.
178
+
179
+ Example:
180
+
181
+ ```py
182
+ >>> from huggingface_hub import model_info
183
+ >>> model_info("<non_existent_repository>")
184
+ (...)
185
+ huggingface_hub.utils._errors.RepositoryNotFoundError: 401 Client Error. (Request ID: PvMw_VjBMjVdMz53WKIzP)
186
+
187
+ Repository Not Found for url: https://huggingface.co/api/models/%3Cnon_existent_repository%3E.
188
+ Please make sure you specified the correct `repo_id` and `repo_type`.
189
+ If the repo is private, make sure you are authenticated.
190
+ Invalid username or password.
191
+ ```
192
+ """
193
+
194
+
195
+ class GatedRepoError(RepositoryNotFoundError):
196
+ """
197
+ Raised when trying to access a gated repository for which the user is not on the
198
+ authorized list.
199
+
200
+ Note: derives from `RepositoryNotFoundError` to ensure backward compatibility.
201
+
202
+ Example:
203
+
204
+ ```py
205
+ >>> from huggingface_hub import model_info
206
+ >>> model_info("<gated_repository>")
207
+ (...)
208
+ huggingface_hub.utils._errors.GatedRepoError: 403 Client Error. (Request ID: ViT1Bf7O_026LGSQuVqfa)
209
+
210
+ Cannot access gated repo for url https://huggingface.co/api/models/ardent-figment/gated-model.
211
+ Access to model ardent-figment/gated-model is restricted and you are not in the authorized list.
212
+ Visit https://huggingface.co/ardent-figment/gated-model to ask for access.
213
+ ```
214
+ """
215
+
216
+
217
+ class DisabledRepoError(HfHubHTTPError):
218
+ """
219
+ Raised when trying to access a repository that has been disabled by its author.
220
+
221
+ Example:
222
+
223
+ ```py
224
+ >>> from huggingface_hub import dataset_info
225
+ >>> dataset_info("laion/laion-art")
226
+ (...)
227
+ huggingface_hub.utils._errors.DisabledRepoError: 403 Client Error. (Request ID: Root=1-659fc3fa-3031673e0f92c71a2260dbe2;bc6f4dfb-b30a-4862-af0a-5cfe827610d8)
228
+
229
+ Cannot access repository for url https://huggingface.co/api/datasets/laion/laion-art.
230
+ Access to this resource is disabled.
231
+ ```
232
+ """
233
+
234
+
235
+ # REVISION ERROR
236
+
237
+
238
+ class RevisionNotFoundError(HfHubHTTPError):
239
+ """
240
+ Raised when trying to access a hf.co URL with a valid repository but an invalid
241
+ revision.
242
+
243
+ Example:
244
+
245
+ ```py
246
+ >>> from huggingface_hub import hf_hub_download
247
+ >>> hf_hub_download('bert-base-cased', 'config.json', revision='<non-existent-revision>')
248
+ (...)
249
+ huggingface_hub.utils._errors.RevisionNotFoundError: 404 Client Error. (Request ID: Mwhe_c3Kt650GcdKEFomX)
250
+
251
+ Revision Not Found for url: https://huggingface.co/bert-base-cased/resolve/%3Cnon-existent-revision%3E/config.json.
252
+ ```
253
+ """
254
+
255
+
256
+ # ENTRY ERRORS
257
+ class EntryNotFoundError(HfHubHTTPError):
258
+ """
259
+ Raised when trying to access a hf.co URL with a valid repository and revision
260
+ but an invalid filename.
261
+
262
+ Example:
263
+
264
+ ```py
265
+ >>> from huggingface_hub import hf_hub_download
266
+ >>> hf_hub_download('bert-base-cased', '<non-existent-file>')
267
+ (...)
268
+ huggingface_hub.utils._errors.EntryNotFoundError: 404 Client Error. (Request ID: 53pNl6M0MxsnG5Sw8JA6x)
269
+
270
+ Entry Not Found for url: https://huggingface.co/bert-base-cased/resolve/main/%3Cnon-existent-file%3E.
271
+ ```
272
+ """
273
+
274
+
275
+ class LocalEntryNotFoundError(EntryNotFoundError, FileNotFoundError, ValueError):
276
+ """
277
+ Raised when trying to access a file or snapshot that is not on the disk when network is
278
+ disabled or unavailable (connection issue). The entry may exist on the Hub.
279
+
280
+ Note: `ValueError` type is to ensure backward compatibility.
281
+ Note: `LocalEntryNotFoundError` derives from `HTTPError` because of `EntryNotFoundError`
282
+ even when it is not a network issue.
283
+
284
+ Example:
285
+
286
+ ```py
287
+ >>> from huggingface_hub import hf_hub_download
288
+ >>> hf_hub_download('bert-base-cased', '<non-cached-file>', local_files_only=True)
289
+ (...)
290
+ huggingface_hub.utils._errors.LocalEntryNotFoundError: Cannot find the requested files in the disk cache and outgoing traffic has been disabled. To enable hf.co look-ups and downloads online, set 'local_files_only' to False.
291
+ ```
292
+ """
293
+
294
+ def __init__(self, message: str):
295
+ super().__init__(message, response=None)
296
+
297
+
298
+ # REQUEST ERROR
299
+ class BadRequestError(HfHubHTTPError, ValueError):
300
+ """
301
+ Raised by `hf_raise_for_status` when the server returns a HTTP 400 error.
302
+
303
+ Example:
304
+
305
+ ```py
306
+ >>> resp = requests.post("hf.co/api/check", ...)
307
+ >>> hf_raise_for_status(resp, endpoint_name="check")
308
+ huggingface_hub.utils._errors.BadRequestError: Bad request for check endpoint: {details} (Request ID: XXX)
309
+ ```
310
+ """
311
+
312
+
313
+ # DDUF file format ERROR
314
+
315
+
316
+ class DDUFError(Exception):
317
+ """Base exception for errors related to the DDUF format."""
318
+
319
+
320
+ class DDUFCorruptedFileError(DDUFError):
321
+ """Exception thrown when the DDUF file is corrupted."""
322
+
323
+
324
+ class DDUFExportError(DDUFError):
325
+ """Base exception for errors during DDUF export."""
326
+
327
+
328
+ class DDUFInvalidEntryNameError(DDUFExportError):
329
+ """Exception thrown when the entry name is invalid."""
330
+
331
+
332
+ # STRICT DATACLASSES ERRORS
333
+
334
+
335
+ class StrictDataclassError(Exception):
336
+ """Base exception for strict dataclasses."""
337
+
338
+
339
+ class StrictDataclassDefinitionError(StrictDataclassError):
340
+ """Exception thrown when a strict dataclass is defined incorrectly."""
341
+
342
+
343
+ class StrictDataclassFieldValidationError(StrictDataclassError):
344
+ """Exception thrown when a strict dataclass fails validation for a given field."""
345
+
346
+ def __init__(self, field: str, cause: Exception):
347
+ error_message = f"Validation error for field '{field}':"
348
+ error_message += f"\n {cause.__class__.__name__}: {cause}"
349
+ super().__init__(error_message)
350
+
351
+
352
+ class StrictDataclassClassValidationError(StrictDataclassError):
353
+ """Exception thrown when a strict dataclass fails validation on a class validator."""
354
+
355
+ def __init__(self, validator: str, cause: Exception):
356
+ error_message = f"Class validation error for validator '{validator}':"
357
+ error_message += f"\n {cause.__class__.__name__}: {cause}"
358
+ super().__init__(error_message)
359
+
360
+
361
+ # XET ERRORS
362
+
363
+
364
+ class XetError(Exception):
365
+ """Base exception for errors related to Xet Storage."""
366
+
367
+
368
+ class XetAuthorizationError(XetError):
369
+ """Exception thrown when the user does not have the right authorization to use Xet Storage."""
370
+
371
+
372
+ class XetRefreshTokenError(XetError):
373
+ """Exception thrown when the refresh token is invalid."""
374
+
375
+
376
+ class XetDownloadError(Exception):
377
+ """Exception thrown when the download from Xet Storage fails."""
.venv/lib/python3.13/site-packages/huggingface_hub/fastai_utils.py ADDED
@@ -0,0 +1,425 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ from pathlib import Path
4
+ from pickle import DEFAULT_PROTOCOL, PicklingError
5
+ from typing import Any, Dict, List, Optional, Union
6
+
7
+ from packaging import version
8
+
9
+ from huggingface_hub import constants, snapshot_download
10
+ from huggingface_hub.hf_api import HfApi
11
+ from huggingface_hub.utils import (
12
+ SoftTemporaryDirectory,
13
+ get_fastai_version,
14
+ get_fastcore_version,
15
+ get_python_version,
16
+ )
17
+
18
+ from .utils import logging, validate_hf_hub_args
19
+ from .utils._runtime import _PY_VERSION # noqa: F401 # for backward compatibility...
20
+
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+
25
+ def _check_fastai_fastcore_versions(
26
+ fastai_min_version: str = "2.4",
27
+ fastcore_min_version: str = "1.3.27",
28
+ ):
29
+ """
30
+ Checks that the installed fastai and fastcore versions are compatible for pickle serialization.
31
+
32
+ Args:
33
+ fastai_min_version (`str`, *optional*):
34
+ The minimum fastai version supported.
35
+ fastcore_min_version (`str`, *optional*):
36
+ The minimum fastcore version supported.
37
+
38
+ <Tip>
39
+ Raises the following error:
40
+
41
+ - [`ImportError`](https://docs.python.org/3/library/exceptions.html#ImportError)
42
+ if the fastai or fastcore libraries are not available or are of an invalid version.
43
+
44
+ </Tip>
45
+ """
46
+
47
+ if (get_fastcore_version() or get_fastai_version()) == "N/A":
48
+ raise ImportError(
49
+ f"fastai>={fastai_min_version} and fastcore>={fastcore_min_version} are"
50
+ f" required. Currently using fastai=={get_fastai_version()} and"
51
+ f" fastcore=={get_fastcore_version()}."
52
+ )
53
+
54
+ current_fastai_version = version.Version(get_fastai_version())
55
+ current_fastcore_version = version.Version(get_fastcore_version())
56
+
57
+ if current_fastai_version < version.Version(fastai_min_version):
58
+ raise ImportError(
59
+ "`push_to_hub_fastai` and `from_pretrained_fastai` require a"
60
+ f" fastai>={fastai_min_version} version, but you are using fastai version"
61
+ f" {get_fastai_version()} which is incompatible. Upgrade with `pip install"
62
+ " fastai==2.5.6`."
63
+ )
64
+
65
+ if current_fastcore_version < version.Version(fastcore_min_version):
66
+ raise ImportError(
67
+ "`push_to_hub_fastai` and `from_pretrained_fastai` require a"
68
+ f" fastcore>={fastcore_min_version} version, but you are using fastcore"
69
+ f" version {get_fastcore_version()} which is incompatible. Upgrade with"
70
+ " `pip install fastcore==1.3.27`."
71
+ )
72
+
73
+
74
+ def _check_fastai_fastcore_pyproject_versions(
75
+ storage_folder: str,
76
+ fastai_min_version: str = "2.4",
77
+ fastcore_min_version: str = "1.3.27",
78
+ ):
79
+ """
80
+ Checks that the `pyproject.toml` file in the directory `storage_folder` has fastai and fastcore versions
81
+ that are compatible with `from_pretrained_fastai` and `push_to_hub_fastai`. If `pyproject.toml` does not exist
82
+ or does not contain versions for fastai and fastcore, then it logs a warning.
83
+
84
+ Args:
85
+ storage_folder (`str`):
86
+ Folder to look for the `pyproject.toml` file.
87
+ fastai_min_version (`str`, *optional*):
88
+ The minimum fastai version supported.
89
+ fastcore_min_version (`str`, *optional*):
90
+ The minimum fastcore version supported.
91
+
92
+ <Tip>
93
+ Raises the following errors:
94
+
95
+ - [`ImportError`](https://docs.python.org/3/library/exceptions.html#ImportError)
96
+ if the `toml` module is not installed.
97
+ - [`ImportError`](https://docs.python.org/3/library/exceptions.html#ImportError)
98
+ if the `pyproject.toml` indicates a lower than minimum supported version of fastai or fastcore.
99
+
100
+ </Tip>
101
+ """
102
+
103
+ try:
104
+ import toml
105
+ except ModuleNotFoundError:
106
+ raise ImportError(
107
+ "`push_to_hub_fastai` and `from_pretrained_fastai` require the toml module."
108
+ " Install it with `pip install toml`."
109
+ )
110
+
111
+ # Checks that a `pyproject.toml`, with `build-system` and `requires` sections, exists in the repository. If so, get a list of required packages.
112
+ if not os.path.isfile(f"{storage_folder}/pyproject.toml"):
113
+ logger.warning(
114
+ "There is no `pyproject.toml` in the repository that contains the fastai"
115
+ " `Learner`. The `pyproject.toml` would allow us to verify that your fastai"
116
+ " and fastcore versions are compatible with those of the model you want to"
117
+ " load."
118
+ )
119
+ return
120
+ pyproject_toml = toml.load(f"{storage_folder}/pyproject.toml")
121
+
122
+ if "build-system" not in pyproject_toml.keys():
123
+ logger.warning(
124
+ "There is no `build-system` section in the pyproject.toml of the repository"
125
+ " that contains the fastai `Learner`. The `build-system` would allow us to"
126
+ " verify that your fastai and fastcore versions are compatible with those"
127
+ " of the model you want to load."
128
+ )
129
+ return
130
+ build_system_toml = pyproject_toml["build-system"]
131
+
132
+ if "requires" not in build_system_toml.keys():
133
+ logger.warning(
134
+ "There is no `requires` section in the pyproject.toml of the repository"
135
+ " that contains the fastai `Learner`. The `requires` would allow us to"
136
+ " verify that your fastai and fastcore versions are compatible with those"
137
+ " of the model you want to load."
138
+ )
139
+ return
140
+ package_versions = build_system_toml["requires"]
141
+
142
+ # Extracts contains fastai and fastcore versions from `pyproject.toml` if available.
143
+ # If the package is specified but not the version (e.g. "fastai" instead of "fastai=2.4"), the default versions are the highest.
144
+ fastai_packages = [pck for pck in package_versions if pck.startswith("fastai")]
145
+ if len(fastai_packages) == 0:
146
+ logger.warning("The repository does not have a fastai version specified in the `pyproject.toml`.")
147
+ # fastai_version is an empty string if not specified
148
+ else:
149
+ fastai_version = str(fastai_packages[0]).partition("=")[2]
150
+ if fastai_version != "" and version.Version(fastai_version) < version.Version(fastai_min_version):
151
+ raise ImportError(
152
+ "`from_pretrained_fastai` requires"
153
+ f" fastai>={fastai_min_version} version but the model to load uses"
154
+ f" {fastai_version} which is incompatible."
155
+ )
156
+
157
+ fastcore_packages = [pck for pck in package_versions if pck.startswith("fastcore")]
158
+ if len(fastcore_packages) == 0:
159
+ logger.warning("The repository does not have a fastcore version specified in the `pyproject.toml`.")
160
+ # fastcore_version is an empty string if not specified
161
+ else:
162
+ fastcore_version = str(fastcore_packages[0]).partition("=")[2]
163
+ if fastcore_version != "" and version.Version(fastcore_version) < version.Version(fastcore_min_version):
164
+ raise ImportError(
165
+ "`from_pretrained_fastai` requires"
166
+ f" fastcore>={fastcore_min_version} version, but you are using fastcore"
167
+ f" version {fastcore_version} which is incompatible."
168
+ )
169
+
170
+
171
+ README_TEMPLATE = """---
172
+ tags:
173
+ - fastai
174
+ ---
175
+
176
+ # Amazing!
177
+
178
+ 🥳 Congratulations on hosting your fastai model on the Hugging Face Hub!
179
+
180
+ # Some next steps
181
+ 1. Fill out this model card with more information (see the template below and the [documentation here](https://huggingface.co/docs/hub/model-repos))!
182
+
183
+ 2. Create a demo in Gradio or Streamlit using 🤗 Spaces ([documentation here](https://huggingface.co/docs/hub/spaces)).
184
+
185
+ 3. Join the fastai community on the [Fastai Discord](https://discord.com/invite/YKrxeNn)!
186
+
187
+ Greetings fellow fastlearner 🤝! Don't forget to delete this content from your model card.
188
+
189
+
190
+ ---
191
+
192
+
193
+ # Model card
194
+
195
+ ## Model description
196
+ More information needed
197
+
198
+ ## Intended uses & limitations
199
+ More information needed
200
+
201
+ ## Training and evaluation data
202
+ More information needed
203
+ """
204
+
205
+ PYPROJECT_TEMPLATE = f"""[build-system]
206
+ requires = ["setuptools>=40.8.0", "wheel", "python={get_python_version()}", "fastai={get_fastai_version()}", "fastcore={get_fastcore_version()}"]
207
+ build-backend = "setuptools.build_meta:__legacy__"
208
+ """
209
+
210
+
211
+ def _create_model_card(repo_dir: Path):
212
+ """
213
+ Creates a model card for the repository.
214
+
215
+ Args:
216
+ repo_dir (`Path`):
217
+ Directory where model card is created.
218
+ """
219
+ readme_path = repo_dir / "README.md"
220
+
221
+ if not readme_path.exists():
222
+ with readme_path.open("w", encoding="utf-8") as f:
223
+ f.write(README_TEMPLATE)
224
+
225
+
226
+ def _create_model_pyproject(repo_dir: Path):
227
+ """
228
+ Creates a `pyproject.toml` for the repository.
229
+
230
+ Args:
231
+ repo_dir (`Path`):
232
+ Directory where `pyproject.toml` is created.
233
+ """
234
+ pyproject_path = repo_dir / "pyproject.toml"
235
+
236
+ if not pyproject_path.exists():
237
+ with pyproject_path.open("w", encoding="utf-8") as f:
238
+ f.write(PYPROJECT_TEMPLATE)
239
+
240
+
241
+ def _save_pretrained_fastai(
242
+ learner,
243
+ save_directory: Union[str, Path],
244
+ config: Optional[Dict[str, Any]] = None,
245
+ ):
246
+ """
247
+ Saves a fastai learner to `save_directory` in pickle format using the default pickle protocol for the version of python used.
248
+
249
+ Args:
250
+ learner (`Learner`):
251
+ The `fastai.Learner` you'd like to save.
252
+ save_directory (`str` or `Path`):
253
+ Specific directory in which you want to save the fastai learner.
254
+ config (`dict`, *optional*):
255
+ Configuration object. Will be uploaded as a .json file. Example: 'https://huggingface.co/espejelomar/fastai-pet-breeds-classification/blob/main/config.json'.
256
+
257
+ <Tip>
258
+
259
+ Raises the following error:
260
+
261
+ - [`RuntimeError`](https://docs.python.org/3/library/exceptions.html#RuntimeError)
262
+ if the config file provided is not a dictionary.
263
+
264
+ </Tip>
265
+ """
266
+ _check_fastai_fastcore_versions()
267
+
268
+ os.makedirs(save_directory, exist_ok=True)
269
+
270
+ # if the user provides config then we update it with the fastai and fastcore versions in CONFIG_TEMPLATE.
271
+ if config is not None:
272
+ if not isinstance(config, dict):
273
+ raise RuntimeError(f"Provided config should be a dict. Got: '{type(config)}'")
274
+ path = os.path.join(save_directory, constants.CONFIG_NAME)
275
+ with open(path, "w") as f:
276
+ json.dump(config, f)
277
+
278
+ _create_model_card(Path(save_directory))
279
+ _create_model_pyproject(Path(save_directory))
280
+
281
+ # learner.export saves the model in `self.path`.
282
+ learner.path = Path(save_directory)
283
+ os.makedirs(save_directory, exist_ok=True)
284
+ try:
285
+ learner.export(
286
+ fname="model.pkl",
287
+ pickle_protocol=DEFAULT_PROTOCOL,
288
+ )
289
+ except PicklingError:
290
+ raise PicklingError(
291
+ "You are using a lambda function, i.e., an anonymous function. `pickle`"
292
+ " cannot pickle function objects and requires that all functions have"
293
+ " names. One possible solution is to name the function."
294
+ )
295
+
296
+
297
+ @validate_hf_hub_args
298
+ def from_pretrained_fastai(
299
+ repo_id: str,
300
+ revision: Optional[str] = None,
301
+ ):
302
+ """
303
+ Load pretrained fastai model from the Hub or from a local directory.
304
+
305
+ Args:
306
+ repo_id (`str`):
307
+ The location where the pickled fastai.Learner is. It can be either of the two:
308
+ - Hosted on the Hugging Face Hub. E.g.: 'espejelomar/fatai-pet-breeds-classification' or 'distilgpt2'.
309
+ You can add a `revision` by appending `@` at the end of `repo_id`. E.g.: `dbmdz/bert-base-german-cased@main`.
310
+ Revision is the specific model version to use. Since we use a git-based system for storing models and other
311
+ artifacts on the Hugging Face Hub, it can be a branch name, a tag name, or a commit id.
312
+ - Hosted locally. `repo_id` would be a directory containing the pickle and a pyproject.toml
313
+ indicating the fastai and fastcore versions used to build the `fastai.Learner`. E.g.: `./my_model_directory/`.
314
+ revision (`str`, *optional*):
315
+ Revision at which the repo's files are downloaded. See documentation of `snapshot_download`.
316
+
317
+ Returns:
318
+ The `fastai.Learner` model in the `repo_id` repo.
319
+ """
320
+ _check_fastai_fastcore_versions()
321
+
322
+ # Load the `repo_id` repo.
323
+ # `snapshot_download` returns the folder where the model was stored.
324
+ # `cache_dir` will be the default '/root/.cache/huggingface/hub'
325
+ if not os.path.isdir(repo_id):
326
+ storage_folder = snapshot_download(
327
+ repo_id=repo_id,
328
+ revision=revision,
329
+ library_name="fastai",
330
+ library_version=get_fastai_version(),
331
+ )
332
+ else:
333
+ storage_folder = repo_id
334
+
335
+ _check_fastai_fastcore_pyproject_versions(storage_folder)
336
+
337
+ from fastai.learner import load_learner # type: ignore
338
+
339
+ return load_learner(os.path.join(storage_folder, "model.pkl"))
340
+
341
+
342
+ @validate_hf_hub_args
343
+ def push_to_hub_fastai(
344
+ learner,
345
+ *,
346
+ repo_id: str,
347
+ commit_message: str = "Push FastAI model using huggingface_hub.",
348
+ private: Optional[bool] = None,
349
+ token: Optional[str] = None,
350
+ config: Optional[dict] = None,
351
+ branch: Optional[str] = None,
352
+ create_pr: Optional[bool] = None,
353
+ allow_patterns: Optional[Union[List[str], str]] = None,
354
+ ignore_patterns: Optional[Union[List[str], str]] = None,
355
+ delete_patterns: Optional[Union[List[str], str]] = None,
356
+ api_endpoint: Optional[str] = None,
357
+ ):
358
+ """
359
+ Upload learner checkpoint files to the Hub.
360
+
361
+ Use `allow_patterns` and `ignore_patterns` to precisely filter which files should be pushed to the hub. Use
362
+ `delete_patterns` to delete existing remote files in the same commit. See [`upload_folder`] reference for more
363
+ details.
364
+
365
+ Args:
366
+ learner (`Learner`):
367
+ The `fastai.Learner' you'd like to push to the Hub.
368
+ repo_id (`str`):
369
+ The repository id for your model in Hub in the format of "namespace/repo_name". The namespace can be your individual account or an organization to which you have write access (for example, 'stanfordnlp/stanza-de').
370
+ commit_message (`str`, *optional*):
371
+ Message to commit while pushing. Will default to :obj:`"add model"`.
372
+ private (`bool`, *optional*):
373
+ Whether or not the repository created should be private.
374
+ If `None` (default), will default to been public except if the organization's default is private.
375
+ token (`str`, *optional*):
376
+ The Hugging Face account token to use as HTTP bearer authorization for remote files. If :obj:`None`, the token will be asked by a prompt.
377
+ config (`dict`, *optional*):
378
+ Configuration object to be saved alongside the model weights.
379
+ branch (`str`, *optional*):
380
+ The git branch on which to push the model. This defaults to
381
+ the default branch as specified in your repository, which
382
+ defaults to `"main"`.
383
+ create_pr (`boolean`, *optional*):
384
+ Whether or not to create a Pull Request from `branch` with that commit.
385
+ Defaults to `False`.
386
+ api_endpoint (`str`, *optional*):
387
+ The API endpoint to use when pushing the model to the hub.
388
+ allow_patterns (`List[str]` or `str`, *optional*):
389
+ If provided, only files matching at least one pattern are pushed.
390
+ ignore_patterns (`List[str]` or `str`, *optional*):
391
+ If provided, files matching any of the patterns are not pushed.
392
+ delete_patterns (`List[str]` or `str`, *optional*):
393
+ If provided, remote files matching any of the patterns will be deleted from the repo.
394
+
395
+ Returns:
396
+ The url of the commit of your model in the given repository.
397
+
398
+ <Tip>
399
+
400
+ Raises the following error:
401
+
402
+ - [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
403
+ if the user is not log on to the Hugging Face Hub.
404
+
405
+ </Tip>
406
+ """
407
+ _check_fastai_fastcore_versions()
408
+ api = HfApi(endpoint=api_endpoint)
409
+ repo_id = api.create_repo(repo_id=repo_id, token=token, private=private, exist_ok=True).repo_id
410
+
411
+ # Push the files to the repo in a single commit
412
+ with SoftTemporaryDirectory() as tmp:
413
+ saved_path = Path(tmp) / repo_id
414
+ _save_pretrained_fastai(learner, saved_path, config=config)
415
+ return api.upload_folder(
416
+ repo_id=repo_id,
417
+ token=token,
418
+ folder_path=saved_path,
419
+ commit_message=commit_message,
420
+ revision=branch,
421
+ create_pr=create_pr,
422
+ allow_patterns=allow_patterns,
423
+ ignore_patterns=ignore_patterns,
424
+ delete_patterns=delete_patterns,
425
+ )
.venv/lib/python3.13/site-packages/huggingface_hub/hf_api.py ADDED
The diff for this file is too large to render. See raw diff
 
.venv/lib/python3.13/site-packages/huggingface_hub/hf_file_system.py ADDED
@@ -0,0 +1,1142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import tempfile
4
+ from collections import deque
5
+ from dataclasses import dataclass, field
6
+ from datetime import datetime
7
+ from itertools import chain
8
+ from pathlib import Path
9
+ from typing import Any, Dict, Iterator, List, NoReturn, Optional, Tuple, Union
10
+ from urllib.parse import quote, unquote
11
+
12
+ import fsspec
13
+ from fsspec.callbacks import _DEFAULT_CALLBACK, NoOpCallback, TqdmCallback
14
+ from fsspec.utils import isfilelike
15
+ from requests import Response
16
+
17
+ from . import constants
18
+ from ._commit_api import CommitOperationCopy, CommitOperationDelete
19
+ from .errors import EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError
20
+ from .file_download import hf_hub_url, http_get
21
+ from .hf_api import HfApi, LastCommitInfo, RepoFile
22
+ from .utils import HFValidationError, hf_raise_for_status, http_backoff
23
+
24
+
25
+ # Regex used to match special revisions with "/" in them (see #1710)
26
+ SPECIAL_REFS_REVISION_REGEX = re.compile(
27
+ r"""
28
+ (^refs\/convert\/\w+) # `refs/convert/parquet` revisions
29
+ |
30
+ (^refs\/pr\/\d+) # PR revisions
31
+ """,
32
+ re.VERBOSE,
33
+ )
34
+
35
+
36
+ @dataclass
37
+ class HfFileSystemResolvedPath:
38
+ """Data structure containing information about a resolved Hugging Face file system path."""
39
+
40
+ repo_type: str
41
+ repo_id: str
42
+ revision: str
43
+ path_in_repo: str
44
+ # The part placed after '@' in the initial path. It can be a quoted or unquoted refs revision.
45
+ # Used to reconstruct the unresolved path to return to the user.
46
+ _raw_revision: Optional[str] = field(default=None, repr=False)
47
+
48
+ def unresolve(self) -> str:
49
+ repo_path = constants.REPO_TYPES_URL_PREFIXES.get(self.repo_type, "") + self.repo_id
50
+ if self._raw_revision:
51
+ return f"{repo_path}@{self._raw_revision}/{self.path_in_repo}".rstrip("/")
52
+ elif self.revision != constants.DEFAULT_REVISION:
53
+ return f"{repo_path}@{safe_revision(self.revision)}/{self.path_in_repo}".rstrip("/")
54
+ else:
55
+ return f"{repo_path}/{self.path_in_repo}".rstrip("/")
56
+
57
+
58
+ class HfFileSystem(fsspec.AbstractFileSystem):
59
+ """
60
+ Access a remote Hugging Face Hub repository as if were a local file system.
61
+
62
+ <Tip warning={true}>
63
+
64
+ [`HfFileSystem`] provides fsspec compatibility, which is useful for libraries that require it (e.g., reading
65
+ Hugging Face datasets directly with `pandas`). However, it introduces additional overhead due to this compatibility
66
+ layer. For better performance and reliability, it's recommended to use `HfApi` methods when possible.
67
+
68
+ </Tip>
69
+
70
+ Args:
71
+ token (`str` or `bool`, *optional*):
72
+ A valid user access token (string). Defaults to the locally saved
73
+ token, which is the recommended method for authentication (see
74
+ https://huggingface.co/docs/huggingface_hub/quick-start#authentication).
75
+ To disable authentication, pass `False`.
76
+ endpoint (`str`, *optional*):
77
+ Endpoint of the Hub. Defaults to <https://huggingface.co>.
78
+ Usage:
79
+
80
+ ```python
81
+ >>> from huggingface_hub import HfFileSystem
82
+
83
+ >>> fs = HfFileSystem()
84
+
85
+ >>> # List files
86
+ >>> fs.glob("my-username/my-model/*.bin")
87
+ ['my-username/my-model/pytorch_model.bin']
88
+ >>> fs.ls("datasets/my-username/my-dataset", detail=False)
89
+ ['datasets/my-username/my-dataset/.gitattributes', 'datasets/my-username/my-dataset/README.md', 'datasets/my-username/my-dataset/data.json']
90
+
91
+ >>> # Read/write files
92
+ >>> with fs.open("my-username/my-model/pytorch_model.bin") as f:
93
+ ... data = f.read()
94
+ >>> with fs.open("my-username/my-model/pytorch_model.bin", "wb") as f:
95
+ ... f.write(data)
96
+ ```
97
+ """
98
+
99
+ root_marker = ""
100
+ protocol = "hf"
101
+
102
+ def __init__(
103
+ self,
104
+ *args,
105
+ endpoint: Optional[str] = None,
106
+ token: Union[bool, str, None] = None,
107
+ **storage_options,
108
+ ):
109
+ super().__init__(*args, **storage_options)
110
+ self.endpoint = endpoint or constants.ENDPOINT
111
+ self.token = token
112
+ self._api = HfApi(endpoint=endpoint, token=token)
113
+ # Maps (repo_type, repo_id, revision) to a 2-tuple with:
114
+ # * the 1st element indicating whether the repositoy and the revision exist
115
+ # * the 2nd element being the exception raised if the repository or revision doesn't exist
116
+ self._repo_and_revision_exists_cache: Dict[
117
+ Tuple[str, str, Optional[str]], Tuple[bool, Optional[Exception]]
118
+ ] = {}
119
+
120
+ def _repo_and_revision_exist(
121
+ self, repo_type: str, repo_id: str, revision: Optional[str]
122
+ ) -> Tuple[bool, Optional[Exception]]:
123
+ if (repo_type, repo_id, revision) not in self._repo_and_revision_exists_cache:
124
+ try:
125
+ self._api.repo_info(
126
+ repo_id, revision=revision, repo_type=repo_type, timeout=constants.HF_HUB_ETAG_TIMEOUT
127
+ )
128
+ except (RepositoryNotFoundError, HFValidationError) as e:
129
+ self._repo_and_revision_exists_cache[(repo_type, repo_id, revision)] = False, e
130
+ self._repo_and_revision_exists_cache[(repo_type, repo_id, None)] = False, e
131
+ except RevisionNotFoundError as e:
132
+ self._repo_and_revision_exists_cache[(repo_type, repo_id, revision)] = False, e
133
+ self._repo_and_revision_exists_cache[(repo_type, repo_id, None)] = True, None
134
+ else:
135
+ self._repo_and_revision_exists_cache[(repo_type, repo_id, revision)] = True, None
136
+ self._repo_and_revision_exists_cache[(repo_type, repo_id, None)] = True, None
137
+ return self._repo_and_revision_exists_cache[(repo_type, repo_id, revision)]
138
+
139
+ def resolve_path(self, path: str, revision: Optional[str] = None) -> HfFileSystemResolvedPath:
140
+ """
141
+ Resolve a Hugging Face file system path into its components.
142
+
143
+ Args:
144
+ path (`str`):
145
+ Path to resolve.
146
+ revision (`str`, *optional*):
147
+ The revision of the repo to resolve. Defaults to the revision specified in the path.
148
+
149
+ Returns:
150
+ [`HfFileSystemResolvedPath`]: Resolved path information containing `repo_type`, `repo_id`, `revision` and `path_in_repo`.
151
+
152
+ Raises:
153
+ `ValueError`:
154
+ If path contains conflicting revision information.
155
+ `NotImplementedError`:
156
+ If trying to list repositories.
157
+ """
158
+
159
+ def _align_revision_in_path_with_revision(
160
+ revision_in_path: Optional[str], revision: Optional[str]
161
+ ) -> Optional[str]:
162
+ if revision is not None:
163
+ if revision_in_path is not None and revision_in_path != revision:
164
+ raise ValueError(
165
+ f'Revision specified in path ("{revision_in_path}") and in `revision` argument ("{revision}")'
166
+ " are not the same."
167
+ )
168
+ else:
169
+ revision = revision_in_path
170
+ return revision
171
+
172
+ path = self._strip_protocol(path)
173
+ if not path:
174
+ # can't list repositories at root
175
+ raise NotImplementedError("Access to repositories lists is not implemented.")
176
+ elif path.split("/")[0] + "/" in constants.REPO_TYPES_URL_PREFIXES.values():
177
+ if "/" not in path:
178
+ # can't list repositories at the repository type level
179
+ raise NotImplementedError("Access to repositories lists is not implemented.")
180
+ repo_type, path = path.split("/", 1)
181
+ repo_type = constants.REPO_TYPES_MAPPING[repo_type]
182
+ else:
183
+ repo_type = constants.REPO_TYPE_MODEL
184
+ if path.count("/") > 0:
185
+ if "@" in path:
186
+ repo_id, revision_in_path = path.split("@", 1)
187
+ if "/" in revision_in_path:
188
+ match = SPECIAL_REFS_REVISION_REGEX.search(revision_in_path)
189
+ if match is not None and revision in (None, match.group()):
190
+ # Handle `refs/convert/parquet` and PR revisions separately
191
+ path_in_repo = SPECIAL_REFS_REVISION_REGEX.sub("", revision_in_path).lstrip("/")
192
+ revision_in_path = match.group()
193
+ else:
194
+ revision_in_path, path_in_repo = revision_in_path.split("/", 1)
195
+ else:
196
+ path_in_repo = ""
197
+ revision = _align_revision_in_path_with_revision(unquote(revision_in_path), revision)
198
+ repo_and_revision_exist, err = self._repo_and_revision_exist(repo_type, repo_id, revision)
199
+ if not repo_and_revision_exist:
200
+ _raise_file_not_found(path, err)
201
+ else:
202
+ revision_in_path = None
203
+ repo_id_with_namespace = "/".join(path.split("/")[:2])
204
+ path_in_repo_with_namespace = "/".join(path.split("/")[2:])
205
+ repo_id_without_namespace = path.split("/")[0]
206
+ path_in_repo_without_namespace = "/".join(path.split("/")[1:])
207
+ repo_id = repo_id_with_namespace
208
+ path_in_repo = path_in_repo_with_namespace
209
+ repo_and_revision_exist, err = self._repo_and_revision_exist(repo_type, repo_id, revision)
210
+ if not repo_and_revision_exist:
211
+ if isinstance(err, (RepositoryNotFoundError, HFValidationError)):
212
+ repo_id = repo_id_without_namespace
213
+ path_in_repo = path_in_repo_without_namespace
214
+ repo_and_revision_exist, _ = self._repo_and_revision_exist(repo_type, repo_id, revision)
215
+ if not repo_and_revision_exist:
216
+ _raise_file_not_found(path, err)
217
+ else:
218
+ _raise_file_not_found(path, err)
219
+ else:
220
+ repo_id = path
221
+ path_in_repo = ""
222
+ if "@" in path:
223
+ repo_id, revision_in_path = path.split("@", 1)
224
+ revision = _align_revision_in_path_with_revision(unquote(revision_in_path), revision)
225
+ else:
226
+ revision_in_path = None
227
+ repo_and_revision_exist, _ = self._repo_and_revision_exist(repo_type, repo_id, revision)
228
+ if not repo_and_revision_exist:
229
+ raise NotImplementedError("Access to repositories lists is not implemented.")
230
+
231
+ revision = revision if revision is not None else constants.DEFAULT_REVISION
232
+ return HfFileSystemResolvedPath(repo_type, repo_id, revision, path_in_repo, _raw_revision=revision_in_path)
233
+
234
+ def invalidate_cache(self, path: Optional[str] = None) -> None:
235
+ """
236
+ Clear the cache for a given path.
237
+
238
+ For more details, refer to [fsspec documentation](https://filesystem-spec.readthedocs.io/en/latest/api.html#fsspec.spec.AbstractFileSystem.invalidate_cache).
239
+
240
+ Args:
241
+ path (`str`, *optional*):
242
+ Path to clear from cache. If not provided, clear the entire cache.
243
+
244
+ """
245
+ if not path:
246
+ self.dircache.clear()
247
+ self._repo_and_revision_exists_cache.clear()
248
+ else:
249
+ resolved_path = self.resolve_path(path)
250
+ path = resolved_path.unresolve()
251
+ while path:
252
+ self.dircache.pop(path, None)
253
+ path = self._parent(path)
254
+
255
+ # Only clear repo cache if path is to repo root
256
+ if not resolved_path.path_in_repo:
257
+ self._repo_and_revision_exists_cache.pop((resolved_path.repo_type, resolved_path.repo_id, None), None)
258
+ self._repo_and_revision_exists_cache.pop(
259
+ (resolved_path.repo_type, resolved_path.repo_id, resolved_path.revision), None
260
+ )
261
+
262
+ def _open(
263
+ self,
264
+ path: str,
265
+ mode: str = "rb",
266
+ revision: Optional[str] = None,
267
+ block_size: Optional[int] = None,
268
+ **kwargs,
269
+ ) -> "HfFileSystemFile":
270
+ if "a" in mode:
271
+ raise NotImplementedError("Appending to remote files is not yet supported.")
272
+ if block_size == 0:
273
+ return HfFileSystemStreamFile(self, path, mode=mode, revision=revision, block_size=block_size, **kwargs)
274
+ else:
275
+ return HfFileSystemFile(self, path, mode=mode, revision=revision, block_size=block_size, **kwargs)
276
+
277
+ def _rm(self, path: str, revision: Optional[str] = None, **kwargs) -> None:
278
+ resolved_path = self.resolve_path(path, revision=revision)
279
+ self._api.delete_file(
280
+ path_in_repo=resolved_path.path_in_repo,
281
+ repo_id=resolved_path.repo_id,
282
+ token=self.token,
283
+ repo_type=resolved_path.repo_type,
284
+ revision=resolved_path.revision,
285
+ commit_message=kwargs.get("commit_message"),
286
+ commit_description=kwargs.get("commit_description"),
287
+ )
288
+ self.invalidate_cache(path=resolved_path.unresolve())
289
+
290
+ def rm(
291
+ self,
292
+ path: str,
293
+ recursive: bool = False,
294
+ maxdepth: Optional[int] = None,
295
+ revision: Optional[str] = None,
296
+ **kwargs,
297
+ ) -> None:
298
+ """
299
+ Delete files from a repository.
300
+
301
+ For more details, refer to [fsspec documentation](https://filesystem-spec.readthedocs.io/en/latest/api.html#fsspec.spec.AbstractFileSystem.rm).
302
+
303
+ <Tip warning={true}>
304
+
305
+ Note: When possible, use `HfApi.delete_file()` for better performance.
306
+
307
+ </Tip>
308
+
309
+ Args:
310
+ path (`str`):
311
+ Path to delete.
312
+ recursive (`bool`, *optional*):
313
+ If True, delete directory and all its contents. Defaults to False.
314
+ maxdepth (`int`, *optional*):
315
+ Maximum number of subdirectories to visit when deleting recursively.
316
+ revision (`str`, *optional*):
317
+ The git revision to delete from.
318
+
319
+ """
320
+ resolved_path = self.resolve_path(path, revision=revision)
321
+ paths = self.expand_path(path, recursive=recursive, maxdepth=maxdepth, revision=revision)
322
+ paths_in_repo = [self.resolve_path(path).path_in_repo for path in paths if not self.isdir(path)]
323
+ operations = [CommitOperationDelete(path_in_repo=path_in_repo) for path_in_repo in paths_in_repo]
324
+ commit_message = f"Delete {path} "
325
+ commit_message += "recursively " if recursive else ""
326
+ commit_message += f"up to depth {maxdepth} " if maxdepth is not None else ""
327
+ # TODO: use `commit_description` to list all the deleted paths?
328
+ self._api.create_commit(
329
+ repo_id=resolved_path.repo_id,
330
+ repo_type=resolved_path.repo_type,
331
+ token=self.token,
332
+ operations=operations,
333
+ revision=resolved_path.revision,
334
+ commit_message=kwargs.get("commit_message", commit_message),
335
+ commit_description=kwargs.get("commit_description"),
336
+ )
337
+ self.invalidate_cache(path=resolved_path.unresolve())
338
+
339
+ def ls(
340
+ self, path: str, detail: bool = True, refresh: bool = False, revision: Optional[str] = None, **kwargs
341
+ ) -> List[Union[str, Dict[str, Any]]]:
342
+ """
343
+ List the contents of a directory.
344
+
345
+ For more details, refer to [fsspec documentation](https://filesystem-spec.readthedocs.io/en/latest/api.html#fsspec.spec.AbstractFileSystem.ls).
346
+
347
+ <Tip warning={true}>
348
+
349
+ Note: When possible, use `HfApi.list_repo_tree()` for better performance.
350
+
351
+ </Tip>
352
+
353
+ Args:
354
+ path (`str`):
355
+ Path to the directory.
356
+ detail (`bool`, *optional*):
357
+ If True, returns a list of dictionaries containing file information. If False,
358
+ returns a list of file paths. Defaults to True.
359
+ refresh (`bool`, *optional*):
360
+ If True, bypass the cache and fetch the latest data. Defaults to False.
361
+ revision (`str`, *optional*):
362
+ The git revision to list from.
363
+
364
+ Returns:
365
+ `List[Union[str, Dict[str, Any]]]`: List of file paths (if detail=False) or list of file information
366
+ dictionaries (if detail=True).
367
+ """
368
+ resolved_path = self.resolve_path(path, revision=revision)
369
+ path = resolved_path.unresolve()
370
+ kwargs = {"expand_info": detail, **kwargs}
371
+ try:
372
+ out = self._ls_tree(path, refresh=refresh, revision=revision, **kwargs)
373
+ except EntryNotFoundError:
374
+ # Path could be a file
375
+ if not resolved_path.path_in_repo:
376
+ _raise_file_not_found(path, None)
377
+ out = self._ls_tree(self._parent(path), refresh=refresh, revision=revision, **kwargs)
378
+ out = [o for o in out if o["name"] == path]
379
+ if len(out) == 0:
380
+ _raise_file_not_found(path, None)
381
+ return out if detail else [o["name"] for o in out]
382
+
383
+ def _ls_tree(
384
+ self,
385
+ path: str,
386
+ recursive: bool = False,
387
+ refresh: bool = False,
388
+ revision: Optional[str] = None,
389
+ expand_info: bool = True,
390
+ ):
391
+ resolved_path = self.resolve_path(path, revision=revision)
392
+ path = resolved_path.unresolve()
393
+ root_path = HfFileSystemResolvedPath(
394
+ resolved_path.repo_type,
395
+ resolved_path.repo_id,
396
+ resolved_path.revision,
397
+ path_in_repo="",
398
+ _raw_revision=resolved_path._raw_revision,
399
+ ).unresolve()
400
+
401
+ out = []
402
+ if path in self.dircache and not refresh:
403
+ cached_path_infos = self.dircache[path]
404
+ out.extend(cached_path_infos)
405
+ dirs_not_in_dircache = []
406
+ if recursive:
407
+ # Use BFS to traverse the cache and build the "recursive "output
408
+ # (The Hub uses a so-called "tree first" strategy for the tree endpoint but we sort the output to follow the spec so the result is (eventually) the same)
409
+ dirs_to_visit = deque(
410
+ [path_info for path_info in cached_path_infos if path_info["type"] == "directory"]
411
+ )
412
+ while dirs_to_visit:
413
+ dir_info = dirs_to_visit.popleft()
414
+ if dir_info["name"] not in self.dircache:
415
+ dirs_not_in_dircache.append(dir_info["name"])
416
+ else:
417
+ cached_path_infos = self.dircache[dir_info["name"]]
418
+ out.extend(cached_path_infos)
419
+ dirs_to_visit.extend(
420
+ [path_info for path_info in cached_path_infos if path_info["type"] == "directory"]
421
+ )
422
+
423
+ dirs_not_expanded = []
424
+ if expand_info:
425
+ # Check if there are directories with non-expanded entries
426
+ dirs_not_expanded = [self._parent(o["name"]) for o in out if o["last_commit"] is None]
427
+
428
+ if (recursive and dirs_not_in_dircache) or (expand_info and dirs_not_expanded):
429
+ # If the dircache is incomplete, find the common path of the missing and non-expanded entries
430
+ # and extend the output with the result of `_ls_tree(common_path, recursive=True)`
431
+ common_prefix = os.path.commonprefix(dirs_not_in_dircache + dirs_not_expanded)
432
+ # Get the parent directory if the common prefix itself is not a directory
433
+ common_path = (
434
+ common_prefix.rstrip("/")
435
+ if common_prefix.endswith("/")
436
+ or common_prefix == root_path
437
+ or common_prefix in chain(dirs_not_in_dircache, dirs_not_expanded)
438
+ else self._parent(common_prefix)
439
+ )
440
+ out = [o for o in out if not o["name"].startswith(common_path + "/")]
441
+ for cached_path in self.dircache:
442
+ if cached_path.startswith(common_path + "/"):
443
+ self.dircache.pop(cached_path, None)
444
+ self.dircache.pop(common_path, None)
445
+ out.extend(
446
+ self._ls_tree(
447
+ common_path,
448
+ recursive=recursive,
449
+ refresh=True,
450
+ revision=revision,
451
+ expand_info=expand_info,
452
+ )
453
+ )
454
+ else:
455
+ tree = self._api.list_repo_tree(
456
+ resolved_path.repo_id,
457
+ resolved_path.path_in_repo,
458
+ recursive=recursive,
459
+ expand=expand_info,
460
+ revision=resolved_path.revision,
461
+ repo_type=resolved_path.repo_type,
462
+ )
463
+ for path_info in tree:
464
+ if isinstance(path_info, RepoFile):
465
+ cache_path_info = {
466
+ "name": root_path + "/" + path_info.path,
467
+ "size": path_info.size,
468
+ "type": "file",
469
+ "blob_id": path_info.blob_id,
470
+ "lfs": path_info.lfs,
471
+ "last_commit": path_info.last_commit,
472
+ "security": path_info.security,
473
+ }
474
+ else:
475
+ cache_path_info = {
476
+ "name": root_path + "/" + path_info.path,
477
+ "size": 0,
478
+ "type": "directory",
479
+ "tree_id": path_info.tree_id,
480
+ "last_commit": path_info.last_commit,
481
+ }
482
+ parent_path = self._parent(cache_path_info["name"])
483
+ self.dircache.setdefault(parent_path, []).append(cache_path_info)
484
+ out.append(cache_path_info)
485
+ return out
486
+
487
+ def walk(self, path: str, *args, **kwargs) -> Iterator[Tuple[str, List[str], List[str]]]:
488
+ """
489
+ Return all files below the given path.
490
+
491
+ For more details, refer to [fsspec documentation](https://filesystem-spec.readthedocs.io/en/latest/api.html#fsspec.spec.AbstractFileSystem.walk).
492
+
493
+ Args:
494
+ path (`str`):
495
+ Root path to list files from.
496
+
497
+ Returns:
498
+ `Iterator[Tuple[str, List[str], List[str]]]`: An iterator of (path, list of directory names, list of file names) tuples.
499
+ """
500
+ # Set expand_info=False by default to get a x10 speed boost
501
+ kwargs = {"expand_info": kwargs.get("detail", False), **kwargs}
502
+ path = self.resolve_path(path, revision=kwargs.get("revision")).unresolve()
503
+ yield from super().walk(path, *args, **kwargs)
504
+
505
+ def glob(self, path: str, **kwargs) -> List[str]:
506
+ """
507
+ Find files by glob-matching.
508
+
509
+ For more details, refer to [fsspec documentation](https://filesystem-spec.readthedocs.io/en/latest/api.html#fsspec.spec.AbstractFileSystem.glob).
510
+
511
+ Args:
512
+ path (`str`):
513
+ Path pattern to match.
514
+
515
+ Returns:
516
+ `List[str]`: List of paths matching the pattern.
517
+ """
518
+ # Set expand_info=False by default to get a x10 speed boost
519
+ kwargs = {"expand_info": kwargs.get("detail", False), **kwargs}
520
+ path = self.resolve_path(path, revision=kwargs.get("revision")).unresolve()
521
+ return super().glob(path, **kwargs)
522
+
523
+ def find(
524
+ self,
525
+ path: str,
526
+ maxdepth: Optional[int] = None,
527
+ withdirs: bool = False,
528
+ detail: bool = False,
529
+ refresh: bool = False,
530
+ revision: Optional[str] = None,
531
+ **kwargs,
532
+ ) -> Union[List[str], Dict[str, Dict[str, Any]]]:
533
+ """
534
+ List all files below path.
535
+
536
+ For more details, refer to [fsspec documentation](https://filesystem-spec.readthedocs.io/en/latest/api.html#fsspec.spec.AbstractFileSystem.find).
537
+
538
+ Args:
539
+ path (`str`):
540
+ Root path to list files from.
541
+ maxdepth (`int`, *optional*):
542
+ Maximum depth to descend into subdirectories.
543
+ withdirs (`bool`, *optional*):
544
+ Include directory paths in the output. Defaults to False.
545
+ detail (`bool`, *optional*):
546
+ If True, returns a dict mapping paths to file information. Defaults to False.
547
+ refresh (`bool`, *optional*):
548
+ If True, bypass the cache and fetch the latest data. Defaults to False.
549
+ revision (`str`, *optional*):
550
+ The git revision to list from.
551
+
552
+ Returns:
553
+ `Union[List[str], Dict[str, Dict[str, Any]]]`: List of paths or dict of file information.
554
+ """
555
+ if maxdepth:
556
+ return super().find(
557
+ path, maxdepth=maxdepth, withdirs=withdirs, detail=detail, refresh=refresh, revision=revision, **kwargs
558
+ )
559
+ resolved_path = self.resolve_path(path, revision=revision)
560
+ path = resolved_path.unresolve()
561
+ kwargs = {"expand_info": detail, **kwargs}
562
+ try:
563
+ out = self._ls_tree(path, recursive=True, refresh=refresh, revision=resolved_path.revision, **kwargs)
564
+ except EntryNotFoundError:
565
+ # Path could be a file
566
+ if self.info(path, revision=revision, **kwargs)["type"] == "file":
567
+ out = {path: {}}
568
+ else:
569
+ out = {}
570
+ else:
571
+ if not withdirs:
572
+ out = [o for o in out if o["type"] != "directory"]
573
+ else:
574
+ # If `withdirs=True`, include the directory itself to be consistent with the spec
575
+ path_info = self.info(path, revision=resolved_path.revision, **kwargs)
576
+ out = [path_info] + out if path_info["type"] == "directory" else out
577
+ out = {o["name"]: o for o in out}
578
+ names = sorted(out)
579
+ if not detail:
580
+ return names
581
+ else:
582
+ return {name: out[name] for name in names}
583
+
584
+ def cp_file(self, path1: str, path2: str, revision: Optional[str] = None, **kwargs) -> None:
585
+ """
586
+ Copy a file within or between repositories.
587
+
588
+ <Tip warning={true}>
589
+
590
+ Note: When possible, use `HfApi.upload_file()` for better performance.
591
+
592
+ </Tip>
593
+
594
+ Args:
595
+ path1 (`str`):
596
+ Source path to copy from.
597
+ path2 (`str`):
598
+ Destination path to copy to.
599
+ revision (`str`, *optional*):
600
+ The git revision to copy from.
601
+
602
+ """
603
+ resolved_path1 = self.resolve_path(path1, revision=revision)
604
+ resolved_path2 = self.resolve_path(path2, revision=revision)
605
+
606
+ same_repo = (
607
+ resolved_path1.repo_type == resolved_path2.repo_type and resolved_path1.repo_id == resolved_path2.repo_id
608
+ )
609
+
610
+ if same_repo:
611
+ commit_message = f"Copy {path1} to {path2}"
612
+ self._api.create_commit(
613
+ repo_id=resolved_path1.repo_id,
614
+ repo_type=resolved_path1.repo_type,
615
+ revision=resolved_path2.revision,
616
+ commit_message=kwargs.get("commit_message", commit_message),
617
+ commit_description=kwargs.get("commit_description", ""),
618
+ operations=[
619
+ CommitOperationCopy(
620
+ src_path_in_repo=resolved_path1.path_in_repo,
621
+ path_in_repo=resolved_path2.path_in_repo,
622
+ src_revision=resolved_path1.revision,
623
+ )
624
+ ],
625
+ )
626
+ else:
627
+ with self.open(path1, "rb", revision=resolved_path1.revision) as f:
628
+ content = f.read()
629
+ commit_message = f"Copy {path1} to {path2}"
630
+ self._api.upload_file(
631
+ path_or_fileobj=content,
632
+ path_in_repo=resolved_path2.path_in_repo,
633
+ repo_id=resolved_path2.repo_id,
634
+ token=self.token,
635
+ repo_type=resolved_path2.repo_type,
636
+ revision=resolved_path2.revision,
637
+ commit_message=kwargs.get("commit_message", commit_message),
638
+ commit_description=kwargs.get("commit_description"),
639
+ )
640
+ self.invalidate_cache(path=resolved_path1.unresolve())
641
+ self.invalidate_cache(path=resolved_path2.unresolve())
642
+
643
+ def modified(self, path: str, **kwargs) -> datetime:
644
+ """
645
+ Get the last modified time of a file.
646
+
647
+ For more details, refer to [fsspec documentation](https://filesystem-spec.readthedocs.io/en/latest/api.html#fsspec.spec.AbstractFileSystem.modified).
648
+
649
+ Args:
650
+ path (`str`):
651
+ Path to the file.
652
+
653
+ Returns:
654
+ `datetime`: Last commit date of the file.
655
+ """
656
+ info = self.info(path, **kwargs)
657
+ return info["last_commit"]["date"]
658
+
659
+ def info(self, path: str, refresh: bool = False, revision: Optional[str] = None, **kwargs) -> Dict[str, Any]:
660
+ """
661
+ Get information about a file or directory.
662
+
663
+ For more details, refer to [fsspec documentation](https://filesystem-spec.readthedocs.io/en/latest/api.html#fsspec.spec.AbstractFileSystem.info).
664
+
665
+ <Tip warning={true}>
666
+
667
+ Note: When possible, use `HfApi.get_paths_info()` or `HfApi.repo_info()` for better performance.
668
+
669
+ </Tip>
670
+
671
+ Args:
672
+ path (`str`):
673
+ Path to get info for.
674
+ refresh (`bool`, *optional*):
675
+ If True, bypass the cache and fetch the latest data. Defaults to False.
676
+ revision (`str`, *optional*):
677
+ The git revision to get info from.
678
+
679
+ Returns:
680
+ `Dict[str, Any]`: Dictionary containing file information (type, size, commit info, etc.).
681
+
682
+ """
683
+ resolved_path = self.resolve_path(path, revision=revision)
684
+ path = resolved_path.unresolve()
685
+ expand_info = kwargs.get(
686
+ "expand_info", True
687
+ ) # don't expose it as a parameter in the public API to follow the spec
688
+ if not resolved_path.path_in_repo:
689
+ # Path is the root directory
690
+ out = {
691
+ "name": path,
692
+ "size": 0,
693
+ "type": "directory",
694
+ }
695
+ if expand_info:
696
+ last_commit = self._api.list_repo_commits(
697
+ resolved_path.repo_id, repo_type=resolved_path.repo_type, revision=resolved_path.revision
698
+ )[-1]
699
+ out = {
700
+ **out,
701
+ "tree_id": None, # TODO: tree_id of the root directory?
702
+ "last_commit": LastCommitInfo(
703
+ oid=last_commit.commit_id, title=last_commit.title, date=last_commit.created_at
704
+ ),
705
+ }
706
+ else:
707
+ out = None
708
+ parent_path = self._parent(path)
709
+ if not expand_info and parent_path not in self.dircache:
710
+ # Fill the cache with cheap call
711
+ self.ls(parent_path, expand_info=False)
712
+ if parent_path in self.dircache:
713
+ # Check if the path is in the cache
714
+ out1 = [o for o in self.dircache[parent_path] if o["name"] == path]
715
+ if not out1:
716
+ _raise_file_not_found(path, None)
717
+ out = out1[0]
718
+ if refresh or out is None or (expand_info and out and out["last_commit"] is None):
719
+ paths_info = self._api.get_paths_info(
720
+ resolved_path.repo_id,
721
+ resolved_path.path_in_repo,
722
+ expand=expand_info,
723
+ revision=resolved_path.revision,
724
+ repo_type=resolved_path.repo_type,
725
+ )
726
+ if not paths_info:
727
+ _raise_file_not_found(path, None)
728
+ path_info = paths_info[0]
729
+ root_path = HfFileSystemResolvedPath(
730
+ resolved_path.repo_type,
731
+ resolved_path.repo_id,
732
+ resolved_path.revision,
733
+ path_in_repo="",
734
+ _raw_revision=resolved_path._raw_revision,
735
+ ).unresolve()
736
+ if isinstance(path_info, RepoFile):
737
+ out = {
738
+ "name": root_path + "/" + path_info.path,
739
+ "size": path_info.size,
740
+ "type": "file",
741
+ "blob_id": path_info.blob_id,
742
+ "lfs": path_info.lfs,
743
+ "last_commit": path_info.last_commit,
744
+ "security": path_info.security,
745
+ }
746
+ else:
747
+ out = {
748
+ "name": root_path + "/" + path_info.path,
749
+ "size": 0,
750
+ "type": "directory",
751
+ "tree_id": path_info.tree_id,
752
+ "last_commit": path_info.last_commit,
753
+ }
754
+ if not expand_info:
755
+ out = {k: out[k] for k in ["name", "size", "type"]}
756
+ assert out is not None
757
+ return out
758
+
759
+ def exists(self, path, **kwargs):
760
+ """
761
+ Check if a file exists.
762
+
763
+ For more details, refer to [fsspec documentation](https://filesystem-spec.readthedocs.io/en/latest/api.html#fsspec.spec.AbstractFileSystem.exists).
764
+
765
+ <Tip warning={true}>
766
+
767
+ Note: When possible, use `HfApi.file_exists()` for better performance.
768
+
769
+ </Tip>
770
+
771
+ Args:
772
+ path (`str`):
773
+ Path to check.
774
+
775
+ Returns:
776
+ `bool`: True if file exists, False otherwise.
777
+ """
778
+ try:
779
+ if kwargs.get("refresh", False):
780
+ self.invalidate_cache(path)
781
+
782
+ self.info(path, **{**kwargs, "expand_info": False})
783
+ return True
784
+ except: # noqa: E722
785
+ return False
786
+
787
+ def isdir(self, path):
788
+ """
789
+ Check if a path is a directory.
790
+
791
+ For more details, refer to [fsspec documentation](https://filesystem-spec.readthedocs.io/en/latest/api.html#fsspec.spec.AbstractFileSystem.isdir).
792
+
793
+ Args:
794
+ path (`str`):
795
+ Path to check.
796
+
797
+ Returns:
798
+ `bool`: True if path is a directory, False otherwise.
799
+ """
800
+ try:
801
+ return self.info(path, expand_info=False)["type"] == "directory"
802
+ except OSError:
803
+ return False
804
+
805
+ def isfile(self, path):
806
+ """
807
+ Check if a path is a file.
808
+
809
+ For more details, refer to [fsspec documentation](https://filesystem-spec.readthedocs.io/en/latest/api.html#fsspec.spec.AbstractFileSystem.isfile).
810
+
811
+ Args:
812
+ path (`str`):
813
+ Path to check.
814
+
815
+ Returns:
816
+ `bool`: True if path is a file, False otherwise.
817
+ """
818
+ try:
819
+ return self.info(path, expand_info=False)["type"] == "file"
820
+ except: # noqa: E722
821
+ return False
822
+
823
+ def url(self, path: str) -> str:
824
+ """
825
+ Get the HTTP URL of the given path.
826
+
827
+ Args:
828
+ path (`str`):
829
+ Path to get URL for.
830
+
831
+ Returns:
832
+ `str`: HTTP URL to access the file or directory on the Hub.
833
+ """
834
+ resolved_path = self.resolve_path(path)
835
+ url = hf_hub_url(
836
+ resolved_path.repo_id,
837
+ resolved_path.path_in_repo,
838
+ repo_type=resolved_path.repo_type,
839
+ revision=resolved_path.revision,
840
+ endpoint=self.endpoint,
841
+ )
842
+ if self.isdir(path):
843
+ url = url.replace("/resolve/", "/tree/", 1)
844
+ return url
845
+
846
+ def get_file(self, rpath, lpath, callback=_DEFAULT_CALLBACK, outfile=None, **kwargs) -> None:
847
+ """
848
+ Copy single remote file to local.
849
+
850
+ <Tip warning={true}>
851
+
852
+ Note: When possible, use `HfApi.hf_hub_download()` for better performance.
853
+
854
+ </Tip>
855
+
856
+ Args:
857
+ rpath (`str`):
858
+ Remote path to download from.
859
+ lpath (`str`):
860
+ Local path to download to.
861
+ callback (`Callback`, *optional*):
862
+ Optional callback to track download progress. Defaults to no callback.
863
+ outfile (`IO`, *optional*):
864
+ Optional file-like object to write to. If provided, `lpath` is ignored.
865
+
866
+ """
867
+ revision = kwargs.get("revision")
868
+ unhandled_kwargs = set(kwargs.keys()) - {"revision"}
869
+ if not isinstance(callback, (NoOpCallback, TqdmCallback)) or len(unhandled_kwargs) > 0:
870
+ # for now, let's not handle custom callbacks
871
+ # and let's not handle custom kwargs
872
+ return super().get_file(rpath, lpath, callback=callback, outfile=outfile, **kwargs)
873
+
874
+ # Taken from https://github.com/fsspec/filesystem_spec/blob/47b445ae4c284a82dd15e0287b1ffc410e8fc470/fsspec/spec.py#L883
875
+ if isfilelike(lpath):
876
+ outfile = lpath
877
+ elif self.isdir(rpath):
878
+ os.makedirs(lpath, exist_ok=True)
879
+ return None
880
+
881
+ if isinstance(lpath, (str, Path)): # otherwise, let's assume it's a file-like object
882
+ os.makedirs(os.path.dirname(lpath), exist_ok=True)
883
+
884
+ # Open file if not already open
885
+ close_file = False
886
+ if outfile is None:
887
+ outfile = open(lpath, "wb")
888
+ close_file = True
889
+ initial_pos = outfile.tell()
890
+
891
+ # Custom implementation of `get_file` to use `http_get`.
892
+ resolve_remote_path = self.resolve_path(rpath, revision=revision)
893
+ expected_size = self.info(rpath, revision=revision)["size"]
894
+ callback.set_size(expected_size)
895
+ try:
896
+ http_get(
897
+ url=hf_hub_url(
898
+ repo_id=resolve_remote_path.repo_id,
899
+ revision=resolve_remote_path.revision,
900
+ filename=resolve_remote_path.path_in_repo,
901
+ repo_type=resolve_remote_path.repo_type,
902
+ endpoint=self.endpoint,
903
+ ),
904
+ temp_file=outfile,
905
+ displayed_filename=rpath,
906
+ expected_size=expected_size,
907
+ resume_size=0,
908
+ headers=self._api._build_hf_headers(),
909
+ _tqdm_bar=callback.tqdm if isinstance(callback, TqdmCallback) else None,
910
+ )
911
+ outfile.seek(initial_pos)
912
+ finally:
913
+ # Close file only if we opened it ourselves
914
+ if close_file:
915
+ outfile.close()
916
+
917
+ @property
918
+ def transaction(self):
919
+ """A context within which files are committed together upon exit
920
+
921
+ Requires the file class to implement `.commit()` and `.discard()`
922
+ for the normal and exception cases.
923
+ """
924
+ # Taken from https://github.com/fsspec/filesystem_spec/blob/3fbb6fee33b46cccb015607630843dea049d3243/fsspec/spec.py#L231
925
+ # See https://github.com/huggingface/huggingface_hub/issues/1733
926
+ raise NotImplementedError("Transactional commits are not supported.")
927
+
928
+ def start_transaction(self):
929
+ """Begin write transaction for deferring files, non-context version"""
930
+ # Taken from https://github.com/fsspec/filesystem_spec/blob/3fbb6fee33b46cccb015607630843dea049d3243/fsspec/spec.py#L241
931
+ # See https://github.com/huggingface/huggingface_hub/issues/1733
932
+ raise NotImplementedError("Transactional commits are not supported.")
933
+
934
+
935
+ class HfFileSystemFile(fsspec.spec.AbstractBufferedFile):
936
+ def __init__(self, fs: HfFileSystem, path: str, revision: Optional[str] = None, **kwargs):
937
+ try:
938
+ self.resolved_path = fs.resolve_path(path, revision=revision)
939
+ except FileNotFoundError as e:
940
+ if "w" in kwargs.get("mode", ""):
941
+ raise FileNotFoundError(
942
+ f"{e}.\nMake sure the repository and revision exist before writing data."
943
+ ) from e
944
+ raise
945
+ # avoid an unnecessary .info() call with expensive expand_info=True to instantiate .details
946
+ if kwargs.get("mode", "rb") == "rb":
947
+ self.details = fs.info(self.resolved_path.unresolve(), expand_info=False)
948
+ super().__init__(fs, self.resolved_path.unresolve(), **kwargs)
949
+ self.fs: HfFileSystem
950
+
951
+ def __del__(self):
952
+ if not hasattr(self, "resolved_path"):
953
+ # Means that the constructor failed. Nothing to do.
954
+ return
955
+ return super().__del__()
956
+
957
+ def _fetch_range(self, start: int, end: int) -> bytes:
958
+ headers = {
959
+ "range": f"bytes={start}-{end - 1}",
960
+ **self.fs._api._build_hf_headers(),
961
+ }
962
+ url = hf_hub_url(
963
+ repo_id=self.resolved_path.repo_id,
964
+ revision=self.resolved_path.revision,
965
+ filename=self.resolved_path.path_in_repo,
966
+ repo_type=self.resolved_path.repo_type,
967
+ endpoint=self.fs.endpoint,
968
+ )
969
+ r = http_backoff(
970
+ "GET",
971
+ url,
972
+ headers=headers,
973
+ retry_on_status_codes=(500, 502, 503, 504),
974
+ timeout=constants.HF_HUB_DOWNLOAD_TIMEOUT,
975
+ )
976
+ hf_raise_for_status(r)
977
+ return r.content
978
+
979
+ def _initiate_upload(self) -> None:
980
+ self.temp_file = tempfile.NamedTemporaryFile(prefix="hffs-", delete=False)
981
+
982
+ def _upload_chunk(self, final: bool = False) -> None:
983
+ self.buffer.seek(0)
984
+ block = self.buffer.read()
985
+ self.temp_file.write(block)
986
+ if final:
987
+ self.temp_file.close()
988
+ self.fs._api.upload_file(
989
+ path_or_fileobj=self.temp_file.name,
990
+ path_in_repo=self.resolved_path.path_in_repo,
991
+ repo_id=self.resolved_path.repo_id,
992
+ token=self.fs.token,
993
+ repo_type=self.resolved_path.repo_type,
994
+ revision=self.resolved_path.revision,
995
+ commit_message=self.kwargs.get("commit_message"),
996
+ commit_description=self.kwargs.get("commit_description"),
997
+ )
998
+ os.remove(self.temp_file.name)
999
+ self.fs.invalidate_cache(
1000
+ path=self.resolved_path.unresolve(),
1001
+ )
1002
+
1003
+ def read(self, length=-1):
1004
+ """Read remote file.
1005
+
1006
+ If `length` is not provided or is -1, the entire file is downloaded and read. On POSIX systems and if
1007
+ `hf_transfer` is not enabled, the file is loaded in memory directly. Otherwise, the file is downloaded to a
1008
+ temporary file and read from there.
1009
+ """
1010
+ if self.mode == "rb" and (length is None or length == -1) and self.loc == 0:
1011
+ with self.fs.open(self.path, "rb", block_size=0) as f: # block_size=0 enables fast streaming
1012
+ out = f.read()
1013
+ self.loc += len(out)
1014
+ return out
1015
+ return super().read(length)
1016
+
1017
+ def url(self) -> str:
1018
+ return self.fs.url(self.path)
1019
+
1020
+
1021
+ class HfFileSystemStreamFile(fsspec.spec.AbstractBufferedFile):
1022
+ def __init__(
1023
+ self,
1024
+ fs: HfFileSystem,
1025
+ path: str,
1026
+ mode: str = "rb",
1027
+ revision: Optional[str] = None,
1028
+ block_size: int = 0,
1029
+ cache_type: str = "none",
1030
+ **kwargs,
1031
+ ):
1032
+ if block_size != 0:
1033
+ raise ValueError(f"HfFileSystemStreamFile only supports block_size=0 but got {block_size}")
1034
+ if cache_type != "none":
1035
+ raise ValueError(f"HfFileSystemStreamFile only supports cache_type='none' but got {cache_type}")
1036
+ if "w" in mode:
1037
+ raise ValueError(f"HfFileSystemStreamFile only supports reading but got mode='{mode}'")
1038
+ try:
1039
+ self.resolved_path = fs.resolve_path(path, revision=revision)
1040
+ except FileNotFoundError as e:
1041
+ if "w" in kwargs.get("mode", ""):
1042
+ raise FileNotFoundError(
1043
+ f"{e}.\nMake sure the repository and revision exist before writing data."
1044
+ ) from e
1045
+ # avoid an unnecessary .info() call to instantiate .details
1046
+ self.details = {"name": self.resolved_path.unresolve(), "size": None}
1047
+ super().__init__(
1048
+ fs, self.resolved_path.unresolve(), mode=mode, block_size=block_size, cache_type=cache_type, **kwargs
1049
+ )
1050
+ self.response: Optional[Response] = None
1051
+ self.fs: HfFileSystem
1052
+
1053
+ def seek(self, loc: int, whence: int = 0):
1054
+ if loc == 0 and whence == 1:
1055
+ return
1056
+ if loc == self.loc and whence == 0:
1057
+ return
1058
+ raise ValueError("Cannot seek streaming HF file")
1059
+
1060
+ def read(self, length: int = -1):
1061
+ read_args = (length,) if length >= 0 else ()
1062
+ if self.response is None:
1063
+ url = hf_hub_url(
1064
+ repo_id=self.resolved_path.repo_id,
1065
+ revision=self.resolved_path.revision,
1066
+ filename=self.resolved_path.path_in_repo,
1067
+ repo_type=self.resolved_path.repo_type,
1068
+ endpoint=self.fs.endpoint,
1069
+ )
1070
+ self.response = http_backoff(
1071
+ "GET",
1072
+ url,
1073
+ headers=self.fs._api._build_hf_headers(),
1074
+ retry_on_status_codes=(500, 502, 503, 504),
1075
+ stream=True,
1076
+ timeout=constants.HF_HUB_DOWNLOAD_TIMEOUT,
1077
+ )
1078
+ hf_raise_for_status(self.response)
1079
+ try:
1080
+ out = self.response.raw.read(*read_args)
1081
+ except Exception:
1082
+ self.response.close()
1083
+
1084
+ # Retry by recreating the connection
1085
+ url = hf_hub_url(
1086
+ repo_id=self.resolved_path.repo_id,
1087
+ revision=self.resolved_path.revision,
1088
+ filename=self.resolved_path.path_in_repo,
1089
+ repo_type=self.resolved_path.repo_type,
1090
+ endpoint=self.fs.endpoint,
1091
+ )
1092
+ self.response = http_backoff(
1093
+ "GET",
1094
+ url,
1095
+ headers={"Range": "bytes=%d-" % self.loc, **self.fs._api._build_hf_headers()},
1096
+ retry_on_status_codes=(500, 502, 503, 504),
1097
+ stream=True,
1098
+ timeout=constants.HF_HUB_DOWNLOAD_TIMEOUT,
1099
+ )
1100
+ hf_raise_for_status(self.response)
1101
+ try:
1102
+ out = self.response.raw.read(*read_args)
1103
+ except Exception:
1104
+ self.response.close()
1105
+ raise
1106
+ self.loc += len(out)
1107
+ return out
1108
+
1109
+ def url(self) -> str:
1110
+ return self.fs.url(self.path)
1111
+
1112
+ def __del__(self):
1113
+ if not hasattr(self, "resolved_path"):
1114
+ # Means that the constructor failed. Nothing to do.
1115
+ return
1116
+ return super().__del__()
1117
+
1118
+ def __reduce__(self):
1119
+ return reopen, (self.fs, self.path, self.mode, self.blocksize, self.cache.name)
1120
+
1121
+
1122
+ def safe_revision(revision: str) -> str:
1123
+ return revision if SPECIAL_REFS_REVISION_REGEX.match(revision) else safe_quote(revision)
1124
+
1125
+
1126
+ def safe_quote(s: str) -> str:
1127
+ return quote(s, safe="")
1128
+
1129
+
1130
+ def _raise_file_not_found(path: str, err: Optional[Exception]) -> NoReturn:
1131
+ msg = path
1132
+ if isinstance(err, RepositoryNotFoundError):
1133
+ msg = f"{path} (repository not found)"
1134
+ elif isinstance(err, RevisionNotFoundError):
1135
+ msg = f"{path} (revision not found)"
1136
+ elif isinstance(err, HFValidationError):
1137
+ msg = f"{path} (invalid repository id)"
1138
+ raise FileNotFoundError(msg) from err
1139
+
1140
+
1141
+ def reopen(fs: HfFileSystem, path: str, mode: str, block_size: int, cache_type: str):
1142
+ return fs.open(path, mode=mode, block_size=block_size, cache_type=cache_type)
.venv/lib/python3.13/site-packages/huggingface_hub/inference_api.py ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ from typing import Any, Dict, List, Optional, Union
3
+
4
+ from . import constants
5
+ from .hf_api import HfApi
6
+ from .utils import build_hf_headers, get_session, is_pillow_available, logging, validate_hf_hub_args
7
+ from .utils._deprecation import _deprecate_method
8
+
9
+
10
+ logger = logging.get_logger(__name__)
11
+
12
+
13
+ ALL_TASKS = [
14
+ # NLP
15
+ "text-classification",
16
+ "token-classification",
17
+ "table-question-answering",
18
+ "question-answering",
19
+ "zero-shot-classification",
20
+ "translation",
21
+ "summarization",
22
+ "conversational",
23
+ "feature-extraction",
24
+ "text-generation",
25
+ "text2text-generation",
26
+ "fill-mask",
27
+ "sentence-similarity",
28
+ # Audio
29
+ "text-to-speech",
30
+ "automatic-speech-recognition",
31
+ "audio-to-audio",
32
+ "audio-classification",
33
+ "voice-activity-detection",
34
+ # Computer vision
35
+ "image-classification",
36
+ "object-detection",
37
+ "image-segmentation",
38
+ "text-to-image",
39
+ "image-to-image",
40
+ # Others
41
+ "tabular-classification",
42
+ "tabular-regression",
43
+ ]
44
+
45
+
46
+ class InferenceApi:
47
+ """Client to configure requests and make calls to the HuggingFace Inference API.
48
+
49
+ Example:
50
+
51
+ ```python
52
+ >>> from huggingface_hub.inference_api import InferenceApi
53
+
54
+ >>> # Mask-fill example
55
+ >>> inference = InferenceApi("bert-base-uncased")
56
+ >>> inference(inputs="The goal of life is [MASK].")
57
+ [{'sequence': 'the goal of life is life.', 'score': 0.10933292657136917, 'token': 2166, 'token_str': 'life'}]
58
+
59
+ >>> # Question Answering example
60
+ >>> inference = InferenceApi("deepset/roberta-base-squad2")
61
+ >>> inputs = {
62
+ ... "question": "What's my name?",
63
+ ... "context": "My name is Clara and I live in Berkeley.",
64
+ ... }
65
+ >>> inference(inputs)
66
+ {'score': 0.9326569437980652, 'start': 11, 'end': 16, 'answer': 'Clara'}
67
+
68
+ >>> # Zero-shot example
69
+ >>> inference = InferenceApi("typeform/distilbert-base-uncased-mnli")
70
+ >>> inputs = "Hi, I recently bought a device from your company but it is not working as advertised and I would like to get reimbursed!"
71
+ >>> params = {"candidate_labels": ["refund", "legal", "faq"]}
72
+ >>> inference(inputs, params)
73
+ {'sequence': 'Hi, I recently bought a device from your company but it is not working as advertised and I would like to get reimbursed!', 'labels': ['refund', 'faq', 'legal'], 'scores': [0.9378499388694763, 0.04914155602455139, 0.013008488342165947]}
74
+
75
+ >>> # Overriding configured task
76
+ >>> inference = InferenceApi("bert-base-uncased", task="feature-extraction")
77
+
78
+ >>> # Text-to-image
79
+ >>> inference = InferenceApi("stabilityai/stable-diffusion-2-1")
80
+ >>> inference("cat")
81
+ <PIL.PngImagePlugin.PngImageFile image (...)>
82
+
83
+ >>> # Return as raw response to parse the output yourself
84
+ >>> inference = InferenceApi("mio/amadeus")
85
+ >>> response = inference("hello world", raw_response=True)
86
+ >>> response.headers
87
+ {"Content-Type": "audio/flac", ...}
88
+ >>> response.content # raw bytes from server
89
+ b'(...)'
90
+ ```
91
+ """
92
+
93
+ @validate_hf_hub_args
94
+ @_deprecate_method(
95
+ version="1.0",
96
+ message=(
97
+ "`InferenceApi` client is deprecated in favor of the more feature-complete `InferenceClient`. Check out"
98
+ " this guide to learn how to convert your script to use it:"
99
+ " https://huggingface.co/docs/huggingface_hub/guides/inference#legacy-inferenceapi-client."
100
+ ),
101
+ )
102
+ def __init__(
103
+ self,
104
+ repo_id: str,
105
+ task: Optional[str] = None,
106
+ token: Optional[str] = None,
107
+ gpu: bool = False,
108
+ ):
109
+ """Inits headers and API call information.
110
+
111
+ Args:
112
+ repo_id (``str``):
113
+ Id of repository (e.g. `user/bert-base-uncased`).
114
+ task (``str``, `optional`, defaults ``None``):
115
+ Whether to force a task instead of using task specified in the
116
+ repository.
117
+ token (`str`, `optional`):
118
+ The API token to use as HTTP bearer authorization. This is not
119
+ the authentication token. You can find the token in
120
+ https://huggingface.co/settings/token. Alternatively, you can
121
+ find both your organizations and personal API tokens using
122
+ `HfApi().whoami(token)`.
123
+ gpu (`bool`, `optional`, defaults `False`):
124
+ Whether to use GPU instead of CPU for inference(requires Startup
125
+ plan at least).
126
+ """
127
+ self.options = {"wait_for_model": True, "use_gpu": gpu}
128
+ self.headers = build_hf_headers(token=token)
129
+
130
+ # Configure task
131
+ model_info = HfApi(token=token).model_info(repo_id=repo_id)
132
+ if not model_info.pipeline_tag and not task:
133
+ raise ValueError(
134
+ "Task not specified in the repository. Please add it to the model card"
135
+ " using pipeline_tag"
136
+ " (https://huggingface.co/docs#how-is-a-models-type-of-inference-api-and-widget-determined)"
137
+ )
138
+
139
+ if task and task != model_info.pipeline_tag:
140
+ if task not in ALL_TASKS:
141
+ raise ValueError(f"Invalid task {task}. Make sure it's valid.")
142
+
143
+ logger.warning(
144
+ "You're using a different task than the one specified in the"
145
+ " repository. Be sure to know what you're doing :)"
146
+ )
147
+ self.task = task
148
+ else:
149
+ assert model_info.pipeline_tag is not None, "Pipeline tag cannot be None"
150
+ self.task = model_info.pipeline_tag
151
+
152
+ self.api_url = f"{constants.INFERENCE_ENDPOINT}/pipeline/{self.task}/{repo_id}"
153
+
154
+ def __repr__(self):
155
+ # Do not add headers to repr to avoid leaking token.
156
+ return f"InferenceAPI(api_url='{self.api_url}', task='{self.task}', options={self.options})"
157
+
158
+ def __call__(
159
+ self,
160
+ inputs: Optional[Union[str, Dict, List[str], List[List[str]]]] = None,
161
+ params: Optional[Dict] = None,
162
+ data: Optional[bytes] = None,
163
+ raw_response: bool = False,
164
+ ) -> Any:
165
+ """Make a call to the Inference API.
166
+
167
+ Args:
168
+ inputs (`str` or `Dict` or `List[str]` or `List[List[str]]`, *optional*):
169
+ Inputs for the prediction.
170
+ params (`Dict`, *optional*):
171
+ Additional parameters for the models. Will be sent as `parameters` in the
172
+ payload.
173
+ data (`bytes`, *optional*):
174
+ Bytes content of the request. In this case, leave `inputs` and `params` empty.
175
+ raw_response (`bool`, defaults to `False`):
176
+ If `True`, the raw `Response` object is returned. You can parse its content
177
+ as preferred. By default, the content is parsed into a more practical format
178
+ (json dictionary or PIL Image for example).
179
+ """
180
+ # Build payload
181
+ payload: Dict[str, Any] = {
182
+ "options": self.options,
183
+ }
184
+ if inputs:
185
+ payload["inputs"] = inputs
186
+ if params:
187
+ payload["parameters"] = params
188
+
189
+ # Make API call
190
+ response = get_session().post(self.api_url, headers=self.headers, json=payload, data=data)
191
+
192
+ # Let the user handle the response
193
+ if raw_response:
194
+ return response
195
+
196
+ # By default, parse the response for the user.
197
+ content_type = response.headers.get("Content-Type") or ""
198
+ if content_type.startswith("image"):
199
+ if not is_pillow_available():
200
+ raise ImportError(
201
+ f"Task '{self.task}' returned as image but Pillow is not installed."
202
+ " Please install it (`pip install Pillow`) or pass"
203
+ " `raw_response=True` to get the raw `Response` object and parse"
204
+ " the image by yourself."
205
+ )
206
+
207
+ from PIL import Image
208
+
209
+ return Image.open(io.BytesIO(response.content))
210
+ elif content_type == "application/json":
211
+ return response.json()
212
+ else:
213
+ raise NotImplementedError(
214
+ f"{content_type} output type is not implemented yet. You can pass"
215
+ " `raw_response=True` to get the raw `Response` object and parse the"
216
+ " output by yourself."
217
+ )
.venv/lib/python3.13/site-packages/huggingface_hub/py.typed ADDED
File without changes
.venv/lib/python3.13/site-packages/huggingface_hub/repocard.py ADDED
@@ -0,0 +1,830 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ from pathlib import Path
4
+ from typing import Any, Dict, Literal, Optional, Type, Union
5
+
6
+ import requests
7
+ import yaml
8
+
9
+ from huggingface_hub.file_download import hf_hub_download
10
+ from huggingface_hub.hf_api import upload_file
11
+ from huggingface_hub.repocard_data import (
12
+ CardData,
13
+ DatasetCardData,
14
+ EvalResult,
15
+ ModelCardData,
16
+ SpaceCardData,
17
+ eval_results_to_model_index,
18
+ model_index_to_eval_results,
19
+ )
20
+ from huggingface_hub.utils import get_session, is_jinja_available, yaml_dump
21
+
22
+ from . import constants
23
+ from .errors import EntryNotFoundError
24
+ from .utils import SoftTemporaryDirectory, logging, validate_hf_hub_args
25
+
26
+
27
+ logger = logging.get_logger(__name__)
28
+
29
+
30
+ TEMPLATE_MODELCARD_PATH = Path(__file__).parent / "templates" / "modelcard_template.md"
31
+ TEMPLATE_DATASETCARD_PATH = Path(__file__).parent / "templates" / "datasetcard_template.md"
32
+
33
+ # exact same regex as in the Hub server. Please keep in sync.
34
+ # See https://github.com/huggingface/moon-landing/blob/main/server/lib/ViewMarkdown.ts#L18
35
+ REGEX_YAML_BLOCK = re.compile(r"^(\s*---[\r\n]+)([\S\s]*?)([\r\n]+---(\r\n|\n|$))")
36
+
37
+
38
+ class RepoCard:
39
+ card_data_class = CardData
40
+ default_template_path = TEMPLATE_MODELCARD_PATH
41
+ repo_type = "model"
42
+
43
+ def __init__(self, content: str, ignore_metadata_errors: bool = False):
44
+ """Initialize a RepoCard from string content. The content should be a
45
+ Markdown file with a YAML block at the beginning and a Markdown body.
46
+
47
+ Args:
48
+ content (`str`): The content of the Markdown file.
49
+
50
+ Example:
51
+ ```python
52
+ >>> from huggingface_hub.repocard import RepoCard
53
+ >>> text = '''
54
+ ... ---
55
+ ... language: en
56
+ ... license: mit
57
+ ... ---
58
+ ...
59
+ ... # My repo
60
+ ... '''
61
+ >>> card = RepoCard(text)
62
+ >>> card.data.to_dict()
63
+ {'language': 'en', 'license': 'mit'}
64
+ >>> card.text
65
+ '\\n# My repo\\n'
66
+
67
+ ```
68
+ <Tip>
69
+ Raises the following error:
70
+
71
+ - [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
72
+ when the content of the repo card metadata is not a dictionary.
73
+
74
+ </Tip>
75
+ """
76
+
77
+ # Set the content of the RepoCard, as well as underlying .data and .text attributes.
78
+ # See the `content` property setter for more details.
79
+ self.ignore_metadata_errors = ignore_metadata_errors
80
+ self.content = content
81
+
82
+ @property
83
+ def content(self):
84
+ """The content of the RepoCard, including the YAML block and the Markdown body."""
85
+ line_break = _detect_line_ending(self._content) or "\n"
86
+ return f"---{line_break}{self.data.to_yaml(line_break=line_break, original_order=self._original_order)}{line_break}---{line_break}{self.text}"
87
+
88
+ @content.setter
89
+ def content(self, content: str):
90
+ """Set the content of the RepoCard."""
91
+ self._content = content
92
+
93
+ match = REGEX_YAML_BLOCK.search(content)
94
+ if match:
95
+ # Metadata found in the YAML block
96
+ yaml_block = match.group(2)
97
+ self.text = content[match.end() :]
98
+ data_dict = yaml.safe_load(yaml_block)
99
+
100
+ if data_dict is None:
101
+ data_dict = {}
102
+
103
+ # The YAML block's data should be a dictionary
104
+ if not isinstance(data_dict, dict):
105
+ raise ValueError("repo card metadata block should be a dict")
106
+ else:
107
+ # Model card without metadata... create empty metadata
108
+ logger.warning("Repo card metadata block was not found. Setting CardData to empty.")
109
+ data_dict = {}
110
+ self.text = content
111
+
112
+ self.data = self.card_data_class(**data_dict, ignore_metadata_errors=self.ignore_metadata_errors)
113
+ self._original_order = list(data_dict.keys())
114
+
115
+ def __str__(self):
116
+ return self.content
117
+
118
+ def save(self, filepath: Union[Path, str]):
119
+ r"""Save a RepoCard to a file.
120
+
121
+ Args:
122
+ filepath (`Union[Path, str]`): Filepath to the markdown file to save.
123
+
124
+ Example:
125
+ ```python
126
+ >>> from huggingface_hub.repocard import RepoCard
127
+ >>> card = RepoCard("---\nlanguage: en\n---\n# This is a test repo card")
128
+ >>> card.save("/tmp/test.md")
129
+
130
+ ```
131
+ """
132
+ filepath = Path(filepath)
133
+ filepath.parent.mkdir(parents=True, exist_ok=True)
134
+ # Preserve newlines as in the existing file.
135
+ with open(filepath, mode="w", newline="", encoding="utf-8") as f:
136
+ f.write(str(self))
137
+
138
+ @classmethod
139
+ def load(
140
+ cls,
141
+ repo_id_or_path: Union[str, Path],
142
+ repo_type: Optional[str] = None,
143
+ token: Optional[str] = None,
144
+ ignore_metadata_errors: bool = False,
145
+ ):
146
+ """Initialize a RepoCard from a Hugging Face Hub repo's README.md or a local filepath.
147
+
148
+ Args:
149
+ repo_id_or_path (`Union[str, Path]`):
150
+ The repo ID associated with a Hugging Face Hub repo or a local filepath.
151
+ repo_type (`str`, *optional*):
152
+ The type of Hugging Face repo to push to. Defaults to None, which will use use "model". Other options
153
+ are "dataset" and "space". Not used when loading from a local filepath. If this is called from a child
154
+ class, the default value will be the child class's `repo_type`.
155
+ token (`str`, *optional*):
156
+ Authentication token, obtained with `huggingface_hub.HfApi.login` method. Will default to the stored token.
157
+ ignore_metadata_errors (`str`):
158
+ If True, errors while parsing the metadata section will be ignored. Some information might be lost during
159
+ the process. Use it at your own risk.
160
+
161
+ Returns:
162
+ [`huggingface_hub.repocard.RepoCard`]: The RepoCard (or subclass) initialized from the repo's
163
+ README.md file or filepath.
164
+
165
+ Example:
166
+ ```python
167
+ >>> from huggingface_hub.repocard import RepoCard
168
+ >>> card = RepoCard.load("nateraw/food")
169
+ >>> assert card.data.tags == ["generated_from_trainer", "image-classification", "pytorch"]
170
+
171
+ ```
172
+ """
173
+
174
+ if Path(repo_id_or_path).is_file():
175
+ card_path = Path(repo_id_or_path)
176
+ elif isinstance(repo_id_or_path, str):
177
+ card_path = Path(
178
+ hf_hub_download(
179
+ repo_id_or_path,
180
+ constants.REPOCARD_NAME,
181
+ repo_type=repo_type or cls.repo_type,
182
+ token=token,
183
+ )
184
+ )
185
+ else:
186
+ raise ValueError(f"Cannot load RepoCard: path not found on disk ({repo_id_or_path}).")
187
+
188
+ # Preserve newlines in the existing file.
189
+ with card_path.open(mode="r", newline="", encoding="utf-8") as f:
190
+ return cls(f.read(), ignore_metadata_errors=ignore_metadata_errors)
191
+
192
+ def validate(self, repo_type: Optional[str] = None):
193
+ """Validates card against Hugging Face Hub's card validation logic.
194
+ Using this function requires access to the internet, so it is only called
195
+ internally by [`huggingface_hub.repocard.RepoCard.push_to_hub`].
196
+
197
+ Args:
198
+ repo_type (`str`, *optional*, defaults to "model"):
199
+ The type of Hugging Face repo to push to. Options are "model", "dataset", and "space".
200
+ If this function is called from a child class, the default will be the child class's `repo_type`.
201
+
202
+ <Tip>
203
+ Raises the following errors:
204
+
205
+ - [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
206
+ if the card fails validation checks.
207
+ - [`HTTPError`](https://requests.readthedocs.io/en/latest/api/#requests.HTTPError)
208
+ if the request to the Hub API fails for any other reason.
209
+
210
+ </Tip>
211
+ """
212
+
213
+ # If repo type is provided, otherwise, use the repo type of the card.
214
+ repo_type = repo_type or self.repo_type
215
+
216
+ body = {
217
+ "repoType": repo_type,
218
+ "content": str(self),
219
+ }
220
+ headers = {"Accept": "text/plain"}
221
+
222
+ try:
223
+ r = get_session().post("https://huggingface.co/api/validate-yaml", body, headers=headers)
224
+ r.raise_for_status()
225
+ except requests.exceptions.HTTPError as exc:
226
+ if r.status_code == 400:
227
+ raise ValueError(r.text)
228
+ else:
229
+ raise exc
230
+
231
+ def push_to_hub(
232
+ self,
233
+ repo_id: str,
234
+ token: Optional[str] = None,
235
+ repo_type: Optional[str] = None,
236
+ commit_message: Optional[str] = None,
237
+ commit_description: Optional[str] = None,
238
+ revision: Optional[str] = None,
239
+ create_pr: Optional[bool] = None,
240
+ parent_commit: Optional[str] = None,
241
+ ):
242
+ """Push a RepoCard to a Hugging Face Hub repo.
243
+
244
+ Args:
245
+ repo_id (`str`):
246
+ The repo ID of the Hugging Face Hub repo to push to. Example: "nateraw/food".
247
+ token (`str`, *optional*):
248
+ Authentication token, obtained with `huggingface_hub.HfApi.login` method. Will default to
249
+ the stored token.
250
+ repo_type (`str`, *optional*, defaults to "model"):
251
+ The type of Hugging Face repo to push to. Options are "model", "dataset", and "space". If this
252
+ function is called by a child class, it will default to the child class's `repo_type`.
253
+ commit_message (`str`, *optional*):
254
+ The summary / title / first line of the generated commit.
255
+ commit_description (`str`, *optional*)
256
+ The description of the generated commit.
257
+ revision (`str`, *optional*):
258
+ The git revision to commit from. Defaults to the head of the `"main"` branch.
259
+ create_pr (`bool`, *optional*):
260
+ Whether or not to create a Pull Request with this commit. Defaults to `False`.
261
+ parent_commit (`str`, *optional*):
262
+ The OID / SHA of the parent commit, as a hexadecimal string. Shorthands (7 first characters) are also supported.
263
+ If specified and `create_pr` is `False`, the commit will fail if `revision` does not point to `parent_commit`.
264
+ If specified and `create_pr` is `True`, the pull request will be created from `parent_commit`.
265
+ Specifying `parent_commit` ensures the repo has not changed before committing the changes, and can be
266
+ especially useful if the repo is updated / committed to concurrently.
267
+ Returns:
268
+ `str`: URL of the commit which updated the card metadata.
269
+ """
270
+
271
+ # If repo type is provided, otherwise, use the repo type of the card.
272
+ repo_type = repo_type or self.repo_type
273
+
274
+ # Validate card before pushing to hub
275
+ self.validate(repo_type=repo_type)
276
+
277
+ with SoftTemporaryDirectory() as tmpdir:
278
+ tmp_path = Path(tmpdir) / constants.REPOCARD_NAME
279
+ tmp_path.write_text(str(self))
280
+ url = upload_file(
281
+ path_or_fileobj=str(tmp_path),
282
+ path_in_repo=constants.REPOCARD_NAME,
283
+ repo_id=repo_id,
284
+ token=token,
285
+ repo_type=repo_type,
286
+ commit_message=commit_message,
287
+ commit_description=commit_description,
288
+ create_pr=create_pr,
289
+ revision=revision,
290
+ parent_commit=parent_commit,
291
+ )
292
+ return url
293
+
294
+ @classmethod
295
+ def from_template(
296
+ cls,
297
+ card_data: CardData,
298
+ template_path: Optional[str] = None,
299
+ template_str: Optional[str] = None,
300
+ **template_kwargs,
301
+ ):
302
+ """Initialize a RepoCard from a template. By default, it uses the default template.
303
+
304
+ Templates are Jinja2 templates that can be customized by passing keyword arguments.
305
+
306
+ Args:
307
+ card_data (`huggingface_hub.CardData`):
308
+ A huggingface_hub.CardData instance containing the metadata you want to include in the YAML
309
+ header of the repo card on the Hugging Face Hub.
310
+ template_path (`str`, *optional*):
311
+ A path to a markdown file with optional Jinja template variables that can be filled
312
+ in with `template_kwargs`. Defaults to the default template.
313
+
314
+ Returns:
315
+ [`huggingface_hub.repocard.RepoCard`]: A RepoCard instance with the specified card data and content from the
316
+ template.
317
+ """
318
+ if is_jinja_available():
319
+ import jinja2
320
+ else:
321
+ raise ImportError(
322
+ "Using RepoCard.from_template requires Jinja2 to be installed. Please"
323
+ " install it with `pip install Jinja2`."
324
+ )
325
+
326
+ kwargs = card_data.to_dict().copy()
327
+ kwargs.update(template_kwargs) # Template_kwargs have priority
328
+
329
+ if template_path is not None:
330
+ template_str = Path(template_path).read_text()
331
+ if template_str is None:
332
+ template_str = Path(cls.default_template_path).read_text()
333
+ template = jinja2.Template(template_str)
334
+ content = template.render(card_data=card_data.to_yaml(), **kwargs)
335
+ return cls(content)
336
+
337
+
338
+ class ModelCard(RepoCard):
339
+ card_data_class = ModelCardData
340
+ default_template_path = TEMPLATE_MODELCARD_PATH
341
+ repo_type = "model"
342
+
343
+ @classmethod
344
+ def from_template( # type: ignore # violates Liskov property but easier to use
345
+ cls,
346
+ card_data: ModelCardData,
347
+ template_path: Optional[str] = None,
348
+ template_str: Optional[str] = None,
349
+ **template_kwargs,
350
+ ):
351
+ """Initialize a ModelCard from a template. By default, it uses the default template, which can be found here:
352
+ https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/modelcard_template.md
353
+
354
+ Templates are Jinja2 templates that can be customized by passing keyword arguments.
355
+
356
+ Args:
357
+ card_data (`huggingface_hub.ModelCardData`):
358
+ A huggingface_hub.ModelCardData instance containing the metadata you want to include in the YAML
359
+ header of the model card on the Hugging Face Hub.
360
+ template_path (`str`, *optional*):
361
+ A path to a markdown file with optional Jinja template variables that can be filled
362
+ in with `template_kwargs`. Defaults to the default template.
363
+
364
+ Returns:
365
+ [`huggingface_hub.ModelCard`]: A ModelCard instance with the specified card data and content from the
366
+ template.
367
+
368
+ Example:
369
+ ```python
370
+ >>> from huggingface_hub import ModelCard, ModelCardData, EvalResult
371
+
372
+ >>> # Using the Default Template
373
+ >>> card_data = ModelCardData(
374
+ ... language='en',
375
+ ... license='mit',
376
+ ... library_name='timm',
377
+ ... tags=['image-classification', 'resnet'],
378
+ ... datasets=['beans'],
379
+ ... metrics=['accuracy'],
380
+ ... )
381
+ >>> card = ModelCard.from_template(
382
+ ... card_data,
383
+ ... model_description='This model does x + y...'
384
+ ... )
385
+
386
+ >>> # Including Evaluation Results
387
+ >>> card_data = ModelCardData(
388
+ ... language='en',
389
+ ... tags=['image-classification', 'resnet'],
390
+ ... eval_results=[
391
+ ... EvalResult(
392
+ ... task_type='image-classification',
393
+ ... dataset_type='beans',
394
+ ... dataset_name='Beans',
395
+ ... metric_type='accuracy',
396
+ ... metric_value=0.9,
397
+ ... ),
398
+ ... ],
399
+ ... model_name='my-cool-model',
400
+ ... )
401
+ >>> card = ModelCard.from_template(card_data)
402
+
403
+ >>> # Using a Custom Template
404
+ >>> card_data = ModelCardData(
405
+ ... language='en',
406
+ ... tags=['image-classification', 'resnet']
407
+ ... )
408
+ >>> card = ModelCard.from_template(
409
+ ... card_data=card_data,
410
+ ... template_path='./src/huggingface_hub/templates/modelcard_template.md',
411
+ ... custom_template_var='custom value', # will be replaced in template if it exists
412
+ ... )
413
+
414
+ ```
415
+ """
416
+ return super().from_template(card_data, template_path, template_str, **template_kwargs)
417
+
418
+
419
+ class DatasetCard(RepoCard):
420
+ card_data_class = DatasetCardData
421
+ default_template_path = TEMPLATE_DATASETCARD_PATH
422
+ repo_type = "dataset"
423
+
424
+ @classmethod
425
+ def from_template( # type: ignore # violates Liskov property but easier to use
426
+ cls,
427
+ card_data: DatasetCardData,
428
+ template_path: Optional[str] = None,
429
+ template_str: Optional[str] = None,
430
+ **template_kwargs,
431
+ ):
432
+ """Initialize a DatasetCard from a template. By default, it uses the default template, which can be found here:
433
+ https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/datasetcard_template.md
434
+
435
+ Templates are Jinja2 templates that can be customized by passing keyword arguments.
436
+
437
+ Args:
438
+ card_data (`huggingface_hub.DatasetCardData`):
439
+ A huggingface_hub.DatasetCardData instance containing the metadata you want to include in the YAML
440
+ header of the dataset card on the Hugging Face Hub.
441
+ template_path (`str`, *optional*):
442
+ A path to a markdown file with optional Jinja template variables that can be filled
443
+ in with `template_kwargs`. Defaults to the default template.
444
+
445
+ Returns:
446
+ [`huggingface_hub.DatasetCard`]: A DatasetCard instance with the specified card data and content from the
447
+ template.
448
+
449
+ Example:
450
+ ```python
451
+ >>> from huggingface_hub import DatasetCard, DatasetCardData
452
+
453
+ >>> # Using the Default Template
454
+ >>> card_data = DatasetCardData(
455
+ ... language='en',
456
+ ... license='mit',
457
+ ... annotations_creators='crowdsourced',
458
+ ... task_categories=['text-classification'],
459
+ ... task_ids=['sentiment-classification', 'text-scoring'],
460
+ ... multilinguality='monolingual',
461
+ ... pretty_name='My Text Classification Dataset',
462
+ ... )
463
+ >>> card = DatasetCard.from_template(
464
+ ... card_data,
465
+ ... pretty_name=card_data.pretty_name,
466
+ ... )
467
+
468
+ >>> # Using a Custom Template
469
+ >>> card_data = DatasetCardData(
470
+ ... language='en',
471
+ ... license='mit',
472
+ ... )
473
+ >>> card = DatasetCard.from_template(
474
+ ... card_data=card_data,
475
+ ... template_path='./src/huggingface_hub/templates/datasetcard_template.md',
476
+ ... custom_template_var='custom value', # will be replaced in template if it exists
477
+ ... )
478
+
479
+ ```
480
+ """
481
+ return super().from_template(card_data, template_path, template_str, **template_kwargs)
482
+
483
+
484
+ class SpaceCard(RepoCard):
485
+ card_data_class = SpaceCardData
486
+ default_template_path = TEMPLATE_MODELCARD_PATH
487
+ repo_type = "space"
488
+
489
+
490
+ def _detect_line_ending(content: str) -> Literal["\r", "\n", "\r\n", None]: # noqa: F722
491
+ """Detect the line ending of a string. Used by RepoCard to avoid making huge diff on newlines.
492
+
493
+ Uses same implementation as in Hub server, keep it in sync.
494
+
495
+ Returns:
496
+ str: The detected line ending of the string.
497
+ """
498
+ cr = content.count("\r")
499
+ lf = content.count("\n")
500
+ crlf = content.count("\r\n")
501
+ if cr + lf == 0:
502
+ return None
503
+ if crlf == cr and crlf == lf:
504
+ return "\r\n"
505
+ if cr > lf:
506
+ return "\r"
507
+ else:
508
+ return "\n"
509
+
510
+
511
+ def metadata_load(local_path: Union[str, Path]) -> Optional[Dict]:
512
+ content = Path(local_path).read_text()
513
+ match = REGEX_YAML_BLOCK.search(content)
514
+ if match:
515
+ yaml_block = match.group(2)
516
+ data = yaml.safe_load(yaml_block)
517
+ if data is None or isinstance(data, dict):
518
+ return data
519
+ raise ValueError("repo card metadata block should be a dict")
520
+ else:
521
+ return None
522
+
523
+
524
+ def metadata_save(local_path: Union[str, Path], data: Dict) -> None:
525
+ """
526
+ Save the metadata dict in the upper YAML part Trying to preserve newlines as
527
+ in the existing file. Docs about open() with newline="" parameter:
528
+ https://docs.python.org/3/library/functions.html?highlight=open#open Does
529
+ not work with "^M" linebreaks, which are replaced by \n
530
+ """
531
+ line_break = "\n"
532
+ content = ""
533
+ # try to detect existing newline character
534
+ if os.path.exists(local_path):
535
+ with open(local_path, "r", newline="", encoding="utf8") as readme:
536
+ content = readme.read()
537
+ if isinstance(readme.newlines, tuple):
538
+ line_break = readme.newlines[0]
539
+ elif isinstance(readme.newlines, str):
540
+ line_break = readme.newlines
541
+
542
+ # creates a new file if it not
543
+ with open(local_path, "w", newline="", encoding="utf8") as readme:
544
+ data_yaml = yaml_dump(data, sort_keys=False, line_break=line_break)
545
+ # sort_keys: keep dict order
546
+ match = REGEX_YAML_BLOCK.search(content)
547
+ if match:
548
+ output = content[: match.start()] + f"---{line_break}{data_yaml}---{line_break}" + content[match.end() :]
549
+ else:
550
+ output = f"---{line_break}{data_yaml}---{line_break}{content}"
551
+
552
+ readme.write(output)
553
+ readme.close()
554
+
555
+
556
+ def metadata_eval_result(
557
+ *,
558
+ model_pretty_name: str,
559
+ task_pretty_name: str,
560
+ task_id: str,
561
+ metrics_pretty_name: str,
562
+ metrics_id: str,
563
+ metrics_value: Any,
564
+ dataset_pretty_name: str,
565
+ dataset_id: str,
566
+ metrics_config: Optional[str] = None,
567
+ metrics_verified: bool = False,
568
+ dataset_config: Optional[str] = None,
569
+ dataset_split: Optional[str] = None,
570
+ dataset_revision: Optional[str] = None,
571
+ metrics_verification_token: Optional[str] = None,
572
+ ) -> Dict:
573
+ """
574
+ Creates a metadata dict with the result from a model evaluated on a dataset.
575
+
576
+ Args:
577
+ model_pretty_name (`str`):
578
+ The name of the model in natural language.
579
+ task_pretty_name (`str`):
580
+ The name of a task in natural language.
581
+ task_id (`str`):
582
+ Example: automatic-speech-recognition. A task id.
583
+ metrics_pretty_name (`str`):
584
+ A name for the metric in natural language. Example: Test WER.
585
+ metrics_id (`str`):
586
+ Example: wer. A metric id from https://hf.co/metrics.
587
+ metrics_value (`Any`):
588
+ The value from the metric. Example: 20.0 or "20.0 ± 1.2".
589
+ dataset_pretty_name (`str`):
590
+ The name of the dataset in natural language.
591
+ dataset_id (`str`):
592
+ Example: common_voice. A dataset id from https://hf.co/datasets.
593
+ metrics_config (`str`, *optional*):
594
+ The name of the metric configuration used in `load_metric()`.
595
+ Example: bleurt-large-512 in `load_metric("bleurt", "bleurt-large-512")`.
596
+ metrics_verified (`bool`, *optional*, defaults to `False`):
597
+ Indicates whether the metrics originate from Hugging Face's [evaluation service](https://huggingface.co/spaces/autoevaluate/model-evaluator) or not. Automatically computed by Hugging Face, do not set.
598
+ dataset_config (`str`, *optional*):
599
+ Example: fr. The name of the dataset configuration used in `load_dataset()`.
600
+ dataset_split (`str`, *optional*):
601
+ Example: test. The name of the dataset split used in `load_dataset()`.
602
+ dataset_revision (`str`, *optional*):
603
+ Example: 5503434ddd753f426f4b38109466949a1217c2bb. The name of the dataset dataset revision
604
+ used in `load_dataset()`.
605
+ metrics_verification_token (`bool`, *optional*):
606
+ A JSON Web Token that is used to verify whether the metrics originate from Hugging Face's [evaluation service](https://huggingface.co/spaces/autoevaluate/model-evaluator) or not.
607
+
608
+ Returns:
609
+ `dict`: a metadata dict with the result from a model evaluated on a dataset.
610
+
611
+ Example:
612
+ ```python
613
+ >>> from huggingface_hub import metadata_eval_result
614
+ >>> results = metadata_eval_result(
615
+ ... model_pretty_name="RoBERTa fine-tuned on ReactionGIF",
616
+ ... task_pretty_name="Text Classification",
617
+ ... task_id="text-classification",
618
+ ... metrics_pretty_name="Accuracy",
619
+ ... metrics_id="accuracy",
620
+ ... metrics_value=0.2662102282047272,
621
+ ... dataset_pretty_name="ReactionJPEG",
622
+ ... dataset_id="julien-c/reactionjpeg",
623
+ ... dataset_config="default",
624
+ ... dataset_split="test",
625
+ ... )
626
+ >>> results == {
627
+ ... 'model-index': [
628
+ ... {
629
+ ... 'name': 'RoBERTa fine-tuned on ReactionGIF',
630
+ ... 'results': [
631
+ ... {
632
+ ... 'task': {
633
+ ... 'type': 'text-classification',
634
+ ... 'name': 'Text Classification'
635
+ ... },
636
+ ... 'dataset': {
637
+ ... 'name': 'ReactionJPEG',
638
+ ... 'type': 'julien-c/reactionjpeg',
639
+ ... 'config': 'default',
640
+ ... 'split': 'test'
641
+ ... },
642
+ ... 'metrics': [
643
+ ... {
644
+ ... 'type': 'accuracy',
645
+ ... 'value': 0.2662102282047272,
646
+ ... 'name': 'Accuracy',
647
+ ... 'verified': False
648
+ ... }
649
+ ... ]
650
+ ... }
651
+ ... ]
652
+ ... }
653
+ ... ]
654
+ ... }
655
+ True
656
+
657
+ ```
658
+ """
659
+
660
+ return {
661
+ "model-index": eval_results_to_model_index(
662
+ model_name=model_pretty_name,
663
+ eval_results=[
664
+ EvalResult(
665
+ task_name=task_pretty_name,
666
+ task_type=task_id,
667
+ metric_name=metrics_pretty_name,
668
+ metric_type=metrics_id,
669
+ metric_value=metrics_value,
670
+ dataset_name=dataset_pretty_name,
671
+ dataset_type=dataset_id,
672
+ metric_config=metrics_config,
673
+ verified=metrics_verified,
674
+ verify_token=metrics_verification_token,
675
+ dataset_config=dataset_config,
676
+ dataset_split=dataset_split,
677
+ dataset_revision=dataset_revision,
678
+ )
679
+ ],
680
+ )
681
+ }
682
+
683
+
684
+ @validate_hf_hub_args
685
+ def metadata_update(
686
+ repo_id: str,
687
+ metadata: Dict,
688
+ *,
689
+ repo_type: Optional[str] = None,
690
+ overwrite: bool = False,
691
+ token: Optional[str] = None,
692
+ commit_message: Optional[str] = None,
693
+ commit_description: Optional[str] = None,
694
+ revision: Optional[str] = None,
695
+ create_pr: bool = False,
696
+ parent_commit: Optional[str] = None,
697
+ ) -> str:
698
+ """
699
+ Updates the metadata in the README.md of a repository on the Hugging Face Hub.
700
+ If the README.md file doesn't exist yet, a new one is created with metadata and an
701
+ the default ModelCard or DatasetCard template. For `space` repo, an error is thrown
702
+ as a Space cannot exist without a `README.md` file.
703
+
704
+ Args:
705
+ repo_id (`str`):
706
+ The name of the repository.
707
+ metadata (`dict`):
708
+ A dictionary containing the metadata to be updated.
709
+ repo_type (`str`, *optional*):
710
+ Set to `"dataset"` or `"space"` if updating to a dataset or space,
711
+ `None` or `"model"` if updating to a model. Default is `None`.
712
+ overwrite (`bool`, *optional*, defaults to `False`):
713
+ If set to `True` an existing field can be overwritten, otherwise
714
+ attempting to overwrite an existing field will cause an error.
715
+ token (`str`, *optional*):
716
+ The Hugging Face authentication token.
717
+ commit_message (`str`, *optional*):
718
+ The summary / title / first line of the generated commit. Defaults to
719
+ `f"Update metadata with huggingface_hub"`
720
+ commit_description (`str` *optional*)
721
+ The description of the generated commit
722
+ revision (`str`, *optional*):
723
+ The git revision to commit from. Defaults to the head of the
724
+ `"main"` branch.
725
+ create_pr (`boolean`, *optional*):
726
+ Whether or not to create a Pull Request from `revision` with that commit.
727
+ Defaults to `False`.
728
+ parent_commit (`str`, *optional*):
729
+ The OID / SHA of the parent commit, as a hexadecimal string. Shorthands (7 first characters) are also supported.
730
+ If specified and `create_pr` is `False`, the commit will fail if `revision` does not point to `parent_commit`.
731
+ If specified and `create_pr` is `True`, the pull request will be created from `parent_commit`.
732
+ Specifying `parent_commit` ensures the repo has not changed before committing the changes, and can be
733
+ especially useful if the repo is updated / committed to concurrently.
734
+ Returns:
735
+ `str`: URL of the commit which updated the card metadata.
736
+
737
+ Example:
738
+ ```python
739
+ >>> from huggingface_hub import metadata_update
740
+ >>> metadata = {'model-index': [{'name': 'RoBERTa fine-tuned on ReactionGIF',
741
+ ... 'results': [{'dataset': {'name': 'ReactionGIF',
742
+ ... 'type': 'julien-c/reactiongif'},
743
+ ... 'metrics': [{'name': 'Recall',
744
+ ... 'type': 'recall',
745
+ ... 'value': 0.7762102282047272}],
746
+ ... 'task': {'name': 'Text Classification',
747
+ ... 'type': 'text-classification'}}]}]}
748
+ >>> url = metadata_update("hf-internal-testing/reactiongif-roberta-card", metadata)
749
+
750
+ ```
751
+ """
752
+ commit_message = commit_message if commit_message is not None else "Update metadata with huggingface_hub"
753
+
754
+ # Card class given repo_type
755
+ card_class: Type[RepoCard]
756
+ if repo_type is None or repo_type == "model":
757
+ card_class = ModelCard
758
+ elif repo_type == "dataset":
759
+ card_class = DatasetCard
760
+ elif repo_type == "space":
761
+ card_class = RepoCard
762
+ else:
763
+ raise ValueError(f"Unknown repo_type: {repo_type}")
764
+
765
+ # Either load repo_card from the Hub or create an empty one.
766
+ # NOTE: Will not create the repo if it doesn't exist.
767
+ try:
768
+ card = card_class.load(repo_id, token=token, repo_type=repo_type)
769
+ except EntryNotFoundError:
770
+ if repo_type == "space":
771
+ raise ValueError("Cannot update metadata on a Space that doesn't contain a `README.md` file.")
772
+
773
+ # Initialize a ModelCard or DatasetCard from default template and no data.
774
+ card = card_class.from_template(CardData())
775
+
776
+ for key, value in metadata.items():
777
+ if key == "model-index":
778
+ # if the new metadata doesn't include a name, either use existing one or repo name
779
+ if "name" not in value[0]:
780
+ value[0]["name"] = getattr(card, "model_name", repo_id)
781
+ model_name, new_results = model_index_to_eval_results(value)
782
+ if card.data.eval_results is None:
783
+ card.data.eval_results = new_results
784
+ card.data.model_name = model_name
785
+ else:
786
+ existing_results = card.data.eval_results
787
+
788
+ # Iterate over new results
789
+ # Iterate over existing results
790
+ # If both results describe the same metric but value is different:
791
+ # If overwrite=True: overwrite the metric value
792
+ # Else: raise ValueError
793
+ # Else: append new result to existing ones.
794
+ for new_result in new_results:
795
+ result_found = False
796
+ for existing_result in existing_results:
797
+ if new_result.is_equal_except_value(existing_result):
798
+ if new_result != existing_result and not overwrite:
799
+ raise ValueError(
800
+ "You passed a new value for the existing metric"
801
+ f" 'name: {new_result.metric_name}, type: "
802
+ f"{new_result.metric_type}'. Set `overwrite=True`"
803
+ " to overwrite existing metrics."
804
+ )
805
+ result_found = True
806
+ existing_result.metric_value = new_result.metric_value
807
+ if existing_result.verified is True:
808
+ existing_result.verify_token = new_result.verify_token
809
+ if not result_found:
810
+ card.data.eval_results.append(new_result)
811
+ else:
812
+ # Any metadata that is not a result metric
813
+ if card.data.get(key) is not None and not overwrite and card.data.get(key) != value:
814
+ raise ValueError(
815
+ f"You passed a new value for the existing meta data field '{key}'."
816
+ " Set `overwrite=True` to overwrite existing metadata."
817
+ )
818
+ else:
819
+ card.data[key] = value
820
+
821
+ return card.push_to_hub(
822
+ repo_id,
823
+ token=token,
824
+ repo_type=repo_type,
825
+ commit_message=commit_message,
826
+ commit_description=commit_description,
827
+ create_pr=create_pr,
828
+ revision=revision,
829
+ parent_commit=parent_commit,
830
+ )
.venv/lib/python3.13/site-packages/huggingface_hub/repository.py ADDED
@@ -0,0 +1,1477 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import atexit
2
+ import os
3
+ import re
4
+ import subprocess
5
+ import threading
6
+ import time
7
+ from contextlib import contextmanager
8
+ from pathlib import Path
9
+ from typing import Callable, Dict, Iterator, List, Optional, Tuple, TypedDict, Union
10
+ from urllib.parse import urlparse
11
+
12
+ from huggingface_hub import constants
13
+ from huggingface_hub.repocard import metadata_load, metadata_save
14
+
15
+ from .hf_api import HfApi, repo_type_and_id_from_hf_id
16
+ from .lfs import LFS_MULTIPART_UPLOAD_COMMAND
17
+ from .utils import (
18
+ SoftTemporaryDirectory,
19
+ get_token,
20
+ logging,
21
+ run_subprocess,
22
+ tqdm,
23
+ validate_hf_hub_args,
24
+ )
25
+ from .utils._deprecation import _deprecate_method
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+
31
+ class CommandInProgress:
32
+ """
33
+ Utility to follow commands launched asynchronously.
34
+ """
35
+
36
+ def __init__(
37
+ self,
38
+ title: str,
39
+ is_done_method: Callable,
40
+ status_method: Callable,
41
+ process: subprocess.Popen,
42
+ post_method: Optional[Callable] = None,
43
+ ):
44
+ self.title = title
45
+ self._is_done = is_done_method
46
+ self._status = status_method
47
+ self._process = process
48
+ self._stderr = ""
49
+ self._stdout = ""
50
+ self._post_method = post_method
51
+
52
+ @property
53
+ def is_done(self) -> bool:
54
+ """
55
+ Whether the process is done.
56
+ """
57
+ result = self._is_done()
58
+
59
+ if result and self._post_method is not None:
60
+ self._post_method()
61
+ self._post_method = None
62
+
63
+ return result
64
+
65
+ @property
66
+ def status(self) -> int:
67
+ """
68
+ The exit code/status of the current action. Will return `0` if the
69
+ command has completed successfully, and a number between 1 and 255 if
70
+ the process errored-out.
71
+
72
+ Will return -1 if the command is still ongoing.
73
+ """
74
+ return self._status()
75
+
76
+ @property
77
+ def failed(self) -> bool:
78
+ """
79
+ Whether the process errored-out.
80
+ """
81
+ return self.status > 0
82
+
83
+ @property
84
+ def stderr(self) -> str:
85
+ """
86
+ The current output message on the standard error.
87
+ """
88
+ if self._process.stderr is not None:
89
+ self._stderr += self._process.stderr.read()
90
+ return self._stderr
91
+
92
+ @property
93
+ def stdout(self) -> str:
94
+ """
95
+ The current output message on the standard output.
96
+ """
97
+ if self._process.stdout is not None:
98
+ self._stdout += self._process.stdout.read()
99
+ return self._stdout
100
+
101
+ def __repr__(self):
102
+ status = self.status
103
+
104
+ if status == -1:
105
+ status = "running"
106
+
107
+ return (
108
+ f"[{self.title} command, status code: {status},"
109
+ f" {'in progress.' if not self.is_done else 'finished.'} PID:"
110
+ f" {self._process.pid}]"
111
+ )
112
+
113
+
114
+ def is_git_repo(folder: Union[str, Path]) -> bool:
115
+ """
116
+ Check if the folder is the root or part of a git repository
117
+
118
+ Args:
119
+ folder (`str`):
120
+ The folder in which to run the command.
121
+
122
+ Returns:
123
+ `bool`: `True` if the repository is part of a repository, `False`
124
+ otherwise.
125
+ """
126
+ folder_exists = os.path.exists(os.path.join(folder, ".git"))
127
+ git_branch = subprocess.run("git branch".split(), cwd=folder, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
128
+ return folder_exists and git_branch.returncode == 0
129
+
130
+
131
+ def is_local_clone(folder: Union[str, Path], remote_url: str) -> bool:
132
+ """
133
+ Check if the folder is a local clone of the remote_url
134
+
135
+ Args:
136
+ folder (`str` or `Path`):
137
+ The folder in which to run the command.
138
+ remote_url (`str`):
139
+ The url of a git repository.
140
+
141
+ Returns:
142
+ `bool`: `True` if the repository is a local clone of the remote
143
+ repository specified, `False` otherwise.
144
+ """
145
+ if not is_git_repo(folder):
146
+ return False
147
+
148
+ remotes = run_subprocess("git remote -v", folder).stdout
149
+
150
+ # Remove token for the test with remotes.
151
+ remote_url = re.sub(r"https://.*@", "https://", remote_url)
152
+ remotes = [re.sub(r"https://.*@", "https://", remote) for remote in remotes.split()]
153
+ return remote_url in remotes
154
+
155
+
156
+ def is_tracked_with_lfs(filename: Union[str, Path]) -> bool:
157
+ """
158
+ Check if the file passed is tracked with git-lfs.
159
+
160
+ Args:
161
+ filename (`str` or `Path`):
162
+ The filename to check.
163
+
164
+ Returns:
165
+ `bool`: `True` if the file passed is tracked with git-lfs, `False`
166
+ otherwise.
167
+ """
168
+ folder = Path(filename).parent
169
+ filename = Path(filename).name
170
+
171
+ try:
172
+ p = run_subprocess("git check-attr -a".split() + [filename], folder)
173
+ attributes = p.stdout.strip()
174
+ except subprocess.CalledProcessError as exc:
175
+ if not is_git_repo(folder):
176
+ return False
177
+ else:
178
+ raise OSError(exc.stderr)
179
+
180
+ if len(attributes) == 0:
181
+ return False
182
+
183
+ found_lfs_tag = {"diff": False, "merge": False, "filter": False}
184
+
185
+ for attribute in attributes.split("\n"):
186
+ for tag in found_lfs_tag.keys():
187
+ if tag in attribute and "lfs" in attribute:
188
+ found_lfs_tag[tag] = True
189
+
190
+ return all(found_lfs_tag.values())
191
+
192
+
193
+ def is_git_ignored(filename: Union[str, Path]) -> bool:
194
+ """
195
+ Check if file is git-ignored. Supports nested .gitignore files.
196
+
197
+ Args:
198
+ filename (`str` or `Path`):
199
+ The filename to check.
200
+
201
+ Returns:
202
+ `bool`: `True` if the file passed is ignored by `git`, `False`
203
+ otherwise.
204
+ """
205
+ folder = Path(filename).parent
206
+ filename = Path(filename).name
207
+
208
+ try:
209
+ p = run_subprocess("git check-ignore".split() + [filename], folder, check=False)
210
+ # Will return exit code 1 if not gitignored
211
+ is_ignored = not bool(p.returncode)
212
+ except subprocess.CalledProcessError as exc:
213
+ raise OSError(exc.stderr)
214
+
215
+ return is_ignored
216
+
217
+
218
+ def is_binary_file(filename: Union[str, Path]) -> bool:
219
+ """
220
+ Check if file is a binary file.
221
+
222
+ Args:
223
+ filename (`str` or `Path`):
224
+ The filename to check.
225
+
226
+ Returns:
227
+ `bool`: `True` if the file passed is a binary file, `False` otherwise.
228
+ """
229
+ try:
230
+ with open(filename, "rb") as f:
231
+ content = f.read(10 * (1024**2)) # Read a maximum of 10MB
232
+
233
+ # Code sample taken from the following stack overflow thread
234
+ # https://stackoverflow.com/questions/898669/how-can-i-detect-if-a-file-is-binary-non-text-in-python/7392391#7392391
235
+ text_chars = bytearray({7, 8, 9, 10, 12, 13, 27} | set(range(0x20, 0x100)) - {0x7F})
236
+ return bool(content.translate(None, text_chars))
237
+ except UnicodeDecodeError:
238
+ return True
239
+
240
+
241
+ def files_to_be_staged(pattern: str = ".", folder: Union[str, Path, None] = None) -> List[str]:
242
+ """
243
+ Returns a list of filenames that are to be staged.
244
+
245
+ Args:
246
+ pattern (`str` or `Path`):
247
+ The pattern of filenames to check. Put `.` to get all files.
248
+ folder (`str` or `Path`):
249
+ The folder in which to run the command.
250
+
251
+ Returns:
252
+ `List[str]`: List of files that are to be staged.
253
+ """
254
+ try:
255
+ p = run_subprocess("git ls-files --exclude-standard -mo".split() + [pattern], folder)
256
+ if len(p.stdout.strip()):
257
+ files = p.stdout.strip().split("\n")
258
+ else:
259
+ files = []
260
+ except subprocess.CalledProcessError as exc:
261
+ raise EnvironmentError(exc.stderr)
262
+
263
+ return files
264
+
265
+
266
+ def is_tracked_upstream(folder: Union[str, Path]) -> bool:
267
+ """
268
+ Check if the current checked-out branch is tracked upstream.
269
+
270
+ Args:
271
+ folder (`str` or `Path`):
272
+ The folder in which to run the command.
273
+
274
+ Returns:
275
+ `bool`: `True` if the current checked-out branch is tracked upstream,
276
+ `False` otherwise.
277
+ """
278
+ try:
279
+ run_subprocess("git rev-parse --symbolic-full-name --abbrev-ref @{u}", folder)
280
+ return True
281
+ except subprocess.CalledProcessError as exc:
282
+ if "HEAD" in exc.stderr:
283
+ raise OSError("No branch checked out")
284
+
285
+ return False
286
+
287
+
288
+ def commits_to_push(folder: Union[str, Path], upstream: Optional[str] = None) -> int:
289
+ """
290
+ Check the number of commits that would be pushed upstream
291
+
292
+ Args:
293
+ folder (`str` or `Path`):
294
+ The folder in which to run the command.
295
+ upstream (`str`, *optional*):
296
+ The name of the upstream repository with which the comparison should be
297
+ made.
298
+
299
+ Returns:
300
+ `int`: Number of commits that would be pushed upstream were a `git
301
+ push` to proceed.
302
+ """
303
+ try:
304
+ result = run_subprocess(f"git cherry -v {upstream or ''}", folder)
305
+ return len(result.stdout.split("\n")) - 1
306
+ except subprocess.CalledProcessError as exc:
307
+ raise EnvironmentError(exc.stderr)
308
+
309
+
310
+ class PbarT(TypedDict):
311
+ # Used to store an opened progress bar in `_lfs_log_progress`
312
+ bar: tqdm
313
+ past_bytes: int
314
+
315
+
316
+ @contextmanager
317
+ def _lfs_log_progress():
318
+ """
319
+ This is a context manager that will log the Git LFS progress of cleaning,
320
+ smudging, pulling and pushing.
321
+ """
322
+
323
+ if logger.getEffectiveLevel() >= logging.ERROR:
324
+ try:
325
+ yield
326
+ except Exception:
327
+ pass
328
+ return
329
+
330
+ def output_progress(stopping_event: threading.Event):
331
+ """
332
+ To be launched as a separate thread with an event meaning it should stop
333
+ the tail.
334
+ """
335
+ # Key is tuple(state, filename), value is a dict(tqdm bar and a previous value)
336
+ pbars: Dict[Tuple[str, str], PbarT] = {}
337
+
338
+ def close_pbars():
339
+ for pbar in pbars.values():
340
+ pbar["bar"].update(pbar["bar"].total - pbar["past_bytes"])
341
+ pbar["bar"].refresh()
342
+ pbar["bar"].close()
343
+
344
+ def tail_file(filename) -> Iterator[str]:
345
+ """
346
+ Creates a generator to be iterated through, which will return each
347
+ line one by one. Will stop tailing the file if the stopping_event is
348
+ set.
349
+ """
350
+ with open(filename, "r") as file:
351
+ current_line = ""
352
+ while True:
353
+ if stopping_event.is_set():
354
+ close_pbars()
355
+ break
356
+
357
+ line_bit = file.readline()
358
+ if line_bit is not None and not len(line_bit.strip()) == 0:
359
+ current_line += line_bit
360
+ if current_line.endswith("\n"):
361
+ yield current_line
362
+ current_line = ""
363
+ else:
364
+ time.sleep(1)
365
+
366
+ # If the file isn't created yet, wait for a few seconds before trying again.
367
+ # Can be interrupted with the stopping_event.
368
+ while not os.path.exists(os.environ["GIT_LFS_PROGRESS"]):
369
+ if stopping_event.is_set():
370
+ close_pbars()
371
+ return
372
+
373
+ time.sleep(2)
374
+
375
+ for line in tail_file(os.environ["GIT_LFS_PROGRESS"]):
376
+ try:
377
+ state, file_progress, byte_progress, filename = line.split()
378
+ except ValueError as error:
379
+ # Try/except to ease debugging. See https://github.com/huggingface/huggingface_hub/issues/1373.
380
+ raise ValueError(f"Cannot unpack LFS progress line:\n{line}") from error
381
+ description = f"{state.capitalize()} file {filename}"
382
+
383
+ current_bytes, total_bytes = byte_progress.split("/")
384
+ current_bytes_int = int(current_bytes)
385
+ total_bytes_int = int(total_bytes)
386
+
387
+ pbar = pbars.get((state, filename))
388
+ if pbar is None:
389
+ # Initialize progress bar
390
+ pbars[(state, filename)] = {
391
+ "bar": tqdm(
392
+ desc=description,
393
+ initial=current_bytes_int,
394
+ total=total_bytes_int,
395
+ unit="B",
396
+ unit_scale=True,
397
+ unit_divisor=1024,
398
+ name="huggingface_hub.lfs_upload",
399
+ ),
400
+ "past_bytes": int(current_bytes),
401
+ }
402
+ else:
403
+ # Update progress bar
404
+ pbar["bar"].update(current_bytes_int - pbar["past_bytes"])
405
+ pbar["past_bytes"] = current_bytes_int
406
+
407
+ current_lfs_progress_value = os.environ.get("GIT_LFS_PROGRESS", "")
408
+
409
+ with SoftTemporaryDirectory() as tmpdir:
410
+ os.environ["GIT_LFS_PROGRESS"] = os.path.join(tmpdir, "lfs_progress")
411
+ logger.debug(f"Following progress in {os.environ['GIT_LFS_PROGRESS']}")
412
+
413
+ exit_event = threading.Event()
414
+ x = threading.Thread(target=output_progress, args=(exit_event,), daemon=True)
415
+ x.start()
416
+
417
+ try:
418
+ yield
419
+ finally:
420
+ exit_event.set()
421
+ x.join()
422
+
423
+ os.environ["GIT_LFS_PROGRESS"] = current_lfs_progress_value
424
+
425
+
426
+ class Repository:
427
+ """
428
+ Helper class to wrap the git and git-lfs commands.
429
+
430
+ The aim is to facilitate interacting with huggingface.co hosted model or
431
+ dataset repos, though not a lot here (if any) is actually specific to
432
+ huggingface.co.
433
+
434
+ <Tip warning={true}>
435
+
436
+ [`Repository`] is deprecated in favor of the http-based alternatives implemented in
437
+ [`HfApi`]. Given its large adoption in legacy code, the complete removal of
438
+ [`Repository`] will only happen in release `v1.0`. For more details, please read
439
+ https://huggingface.co/docs/huggingface_hub/concepts/git_vs_http.
440
+
441
+ </Tip>
442
+ """
443
+
444
+ command_queue: List[CommandInProgress]
445
+
446
+ @validate_hf_hub_args
447
+ @_deprecate_method(
448
+ version="1.0",
449
+ message=(
450
+ "Please prefer the http-based alternatives instead. Given its large adoption in legacy code, the complete"
451
+ " removal is only planned on next major release.\nFor more details, please read"
452
+ " https://huggingface.co/docs/huggingface_hub/concepts/git_vs_http."
453
+ ),
454
+ )
455
+ def __init__(
456
+ self,
457
+ local_dir: Union[str, Path],
458
+ clone_from: Optional[str] = None,
459
+ repo_type: Optional[str] = None,
460
+ token: Union[bool, str] = True,
461
+ git_user: Optional[str] = None,
462
+ git_email: Optional[str] = None,
463
+ revision: Optional[str] = None,
464
+ skip_lfs_files: bool = False,
465
+ client: Optional[HfApi] = None,
466
+ ):
467
+ """
468
+ Instantiate a local clone of a git repo.
469
+
470
+ If `clone_from` is set, the repo will be cloned from an existing remote repository.
471
+ If the remote repo does not exist, a `EnvironmentError` exception will be thrown.
472
+ Please create the remote repo first using [`create_repo`].
473
+
474
+ `Repository` uses the local git credentials by default. If explicitly set, the `token`
475
+ or the `git_user`/`git_email` pair will be used instead.
476
+
477
+ Args:
478
+ local_dir (`str` or `Path`):
479
+ path (e.g. `'my_trained_model/'`) to the local directory, where
480
+ the `Repository` will be initialized.
481
+ clone_from (`str`, *optional*):
482
+ Either a repository url or `repo_id`.
483
+ Example:
484
+ - `"https://huggingface.co/philschmid/playground-tests"`
485
+ - `"philschmid/playground-tests"`
486
+ repo_type (`str`, *optional*):
487
+ To set when cloning a repo from a repo_id. Default is model.
488
+ token (`bool` or `str`, *optional*):
489
+ A valid authentication token (see https://huggingface.co/settings/token).
490
+ If `None` or `True` and machine is logged in (through `huggingface-cli login`
491
+ or [`~huggingface_hub.login`]), token will be retrieved from the cache.
492
+ If `False`, token is not sent in the request header.
493
+ git_user (`str`, *optional*):
494
+ will override the `git config user.name` for committing and
495
+ pushing files to the hub.
496
+ git_email (`str`, *optional*):
497
+ will override the `git config user.email` for committing and
498
+ pushing files to the hub.
499
+ revision (`str`, *optional*):
500
+ Revision to checkout after initializing the repository. If the
501
+ revision doesn't exist, a branch will be created with that
502
+ revision name from the default branch's current HEAD.
503
+ skip_lfs_files (`bool`, *optional*, defaults to `False`):
504
+ whether to skip git-LFS files or not.
505
+ client (`HfApi`, *optional*):
506
+ Instance of [`HfApi`] to use when calling the HF Hub API. A new
507
+ instance will be created if this is left to `None`.
508
+
509
+ Raises:
510
+ [`EnvironmentError`](https://docs.python.org/3/library/exceptions.html#EnvironmentError)
511
+ If the remote repository set in `clone_from` does not exist.
512
+ """
513
+ if isinstance(local_dir, Path):
514
+ local_dir = str(local_dir)
515
+ os.makedirs(local_dir, exist_ok=True)
516
+ self.local_dir = os.path.join(os.getcwd(), local_dir)
517
+ self._repo_type = repo_type
518
+ self.command_queue = []
519
+ self.skip_lfs_files = skip_lfs_files
520
+ self.client = client if client is not None else HfApi()
521
+
522
+ self.check_git_versions()
523
+
524
+ if isinstance(token, str):
525
+ self.huggingface_token: Optional[str] = token
526
+ elif token is False:
527
+ self.huggingface_token = None
528
+ else:
529
+ # if `True` -> explicit use of the cached token
530
+ # if `None` -> implicit use of the cached token
531
+ self.huggingface_token = get_token()
532
+
533
+ if clone_from is not None:
534
+ self.clone_from(repo_url=clone_from)
535
+ else:
536
+ if is_git_repo(self.local_dir):
537
+ logger.debug("[Repository] is a valid git repo")
538
+ else:
539
+ raise ValueError("If not specifying `clone_from`, you need to pass Repository a valid git clone.")
540
+
541
+ if self.huggingface_token is not None and (git_email is None or git_user is None):
542
+ user = self.client.whoami(self.huggingface_token)
543
+
544
+ if git_email is None:
545
+ git_email = user.get("email")
546
+
547
+ if git_user is None:
548
+ git_user = user.get("fullname")
549
+
550
+ if git_user is not None or git_email is not None:
551
+ self.git_config_username_and_email(git_user, git_email)
552
+
553
+ self.lfs_enable_largefiles()
554
+ self.git_credential_helper_store()
555
+
556
+ if revision is not None:
557
+ self.git_checkout(revision, create_branch_ok=True)
558
+
559
+ # This ensures that all commands exit before exiting the Python runtime.
560
+ # This will ensure all pushes register on the hub, even if other errors happen in subsequent operations.
561
+ atexit.register(self.wait_for_commands)
562
+
563
+ @property
564
+ def current_branch(self) -> str:
565
+ """
566
+ Returns the current checked out branch.
567
+
568
+ Returns:
569
+ `str`: Current checked out branch.
570
+ """
571
+ try:
572
+ result = run_subprocess("git rev-parse --abbrev-ref HEAD", self.local_dir).stdout.strip()
573
+ except subprocess.CalledProcessError as exc:
574
+ raise EnvironmentError(exc.stderr)
575
+
576
+ return result
577
+
578
+ def check_git_versions(self):
579
+ """
580
+ Checks that `git` and `git-lfs` can be run.
581
+
582
+ Raises:
583
+ [`EnvironmentError`](https://docs.python.org/3/library/exceptions.html#EnvironmentError)
584
+ If `git` or `git-lfs` are not installed.
585
+ """
586
+ try:
587
+ git_version = run_subprocess("git --version", self.local_dir).stdout.strip()
588
+ except FileNotFoundError:
589
+ raise EnvironmentError("Looks like you do not have git installed, please install.")
590
+
591
+ try:
592
+ lfs_version = run_subprocess("git-lfs --version", self.local_dir).stdout.strip()
593
+ except FileNotFoundError:
594
+ raise EnvironmentError(
595
+ "Looks like you do not have git-lfs installed, please install."
596
+ " You can install from https://git-lfs.github.com/."
597
+ " Then run `git lfs install` (you only have to do this once)."
598
+ )
599
+ logger.info(git_version + "\n" + lfs_version)
600
+
601
+ @validate_hf_hub_args
602
+ def clone_from(self, repo_url: str, token: Union[bool, str, None] = None):
603
+ """
604
+ Clone from a remote. If the folder already exists, will try to clone the
605
+ repository within it.
606
+
607
+ If this folder is a git repository with linked history, will try to
608
+ update the repository.
609
+
610
+ Args:
611
+ repo_url (`str`):
612
+ The URL from which to clone the repository
613
+ token (`Union[str, bool]`, *optional*):
614
+ Whether to use the authentication token. It can be:
615
+ - a string which is the token itself
616
+ - `False`, which would not use the authentication token
617
+ - `True`, which would fetch the authentication token from the
618
+ local folder and use it (you should be logged in for this to
619
+ work).
620
+ - `None`, which would retrieve the value of
621
+ `self.huggingface_token`.
622
+
623
+ <Tip>
624
+
625
+ Raises the following error:
626
+
627
+ - [`ValueError`](https://docs.python.org/3/library/exceptions.html#ValueError)
628
+ if an organization token (starts with "api_org") is passed. Use must use
629
+ your own personal access token (see https://hf.co/settings/tokens).
630
+
631
+ - [`EnvironmentError`](https://docs.python.org/3/library/exceptions.html#EnvironmentError)
632
+ if you are trying to clone the repository in a non-empty folder, or if the
633
+ `git` operations raise errors.
634
+
635
+ </Tip>
636
+ """
637
+ token = (
638
+ token # str -> use it
639
+ if isinstance(token, str)
640
+ else (
641
+ None # `False` -> explicit no token
642
+ if token is False
643
+ else self.huggingface_token # `None` or `True` -> use default
644
+ )
645
+ )
646
+ if token is not None and token.startswith("api_org"):
647
+ raise ValueError(
648
+ "You must use your personal access token, not an Organization token"
649
+ " (see https://hf.co/settings/tokens)."
650
+ )
651
+
652
+ hub_url = self.client.endpoint
653
+ if hub_url in repo_url or ("http" not in repo_url and len(repo_url.split("/")) <= 2):
654
+ repo_type, namespace, repo_name = repo_type_and_id_from_hf_id(repo_url, hub_url=hub_url)
655
+ repo_id = f"{namespace}/{repo_name}" if namespace is not None else repo_name
656
+
657
+ if repo_type is not None:
658
+ self._repo_type = repo_type
659
+
660
+ repo_url = hub_url + "/"
661
+
662
+ if self._repo_type in constants.REPO_TYPES_URL_PREFIXES:
663
+ repo_url += constants.REPO_TYPES_URL_PREFIXES[self._repo_type]
664
+
665
+ if token is not None:
666
+ # Add token in git url when provided
667
+ scheme = urlparse(repo_url).scheme
668
+ repo_url = repo_url.replace(f"{scheme}://", f"{scheme}://user:{token}@")
669
+
670
+ repo_url += repo_id
671
+
672
+ # For error messages, it's cleaner to show the repo url without the token.
673
+ clean_repo_url = re.sub(r"(https?)://.*@", r"\1://", repo_url)
674
+ try:
675
+ run_subprocess("git lfs install", self.local_dir)
676
+
677
+ # checks if repository is initialized in a empty repository or in one with files
678
+ if len(os.listdir(self.local_dir)) == 0:
679
+ logger.warning(f"Cloning {clean_repo_url} into local empty directory.")
680
+
681
+ with _lfs_log_progress():
682
+ env = os.environ.copy()
683
+
684
+ if self.skip_lfs_files:
685
+ env.update({"GIT_LFS_SKIP_SMUDGE": "1"})
686
+
687
+ run_subprocess(
688
+ # 'git lfs clone' is deprecated (will display a warning in the terminal)
689
+ # but we still use it as it provides a nicer UX when downloading large
690
+ # files (shows progress).
691
+ f"{'git clone' if self.skip_lfs_files else 'git lfs clone'} {repo_url} .",
692
+ self.local_dir,
693
+ env=env,
694
+ )
695
+ else:
696
+ # Check if the folder is the root of a git repository
697
+ if not is_git_repo(self.local_dir):
698
+ raise EnvironmentError(
699
+ "Tried to clone a repository in a non-empty folder that isn't"
700
+ f" a git repository ('{self.local_dir}'). If you really want to"
701
+ f" do this, do it manually:\n cd {self.local_dir} && git init"
702
+ " && git remote add origin && git pull origin main\n or clone"
703
+ " repo to a new folder and move your existing files there"
704
+ " afterwards."
705
+ )
706
+
707
+ if is_local_clone(self.local_dir, repo_url):
708
+ logger.warning(
709
+ f"{self.local_dir} is already a clone of {clean_repo_url}."
710
+ " Make sure you pull the latest changes with"
711
+ " `repo.git_pull()`."
712
+ )
713
+ else:
714
+ output = run_subprocess("git remote get-url origin", self.local_dir, check=False)
715
+
716
+ error_msg = (
717
+ f"Tried to clone {clean_repo_url} in an unrelated git"
718
+ " repository.\nIf you believe this is an error, please add"
719
+ f" a remote with the following URL: {clean_repo_url}."
720
+ )
721
+ if output.returncode == 0:
722
+ clean_local_remote_url = re.sub(r"https://.*@", "https://", output.stdout)
723
+ error_msg += f"\nLocal path has its origin defined as: {clean_local_remote_url}"
724
+ raise EnvironmentError(error_msg)
725
+
726
+ except subprocess.CalledProcessError as exc:
727
+ raise EnvironmentError(exc.stderr)
728
+
729
+ def git_config_username_and_email(self, git_user: Optional[str] = None, git_email: Optional[str] = None):
730
+ """
731
+ Sets git username and email (only in the current repo).
732
+
733
+ Args:
734
+ git_user (`str`, *optional*):
735
+ The username to register through `git`.
736
+ git_email (`str`, *optional*):
737
+ The email to register through `git`.
738
+ """
739
+ try:
740
+ if git_user is not None:
741
+ run_subprocess("git config user.name".split() + [git_user], self.local_dir)
742
+
743
+ if git_email is not None:
744
+ run_subprocess(f"git config user.email {git_email}".split(), self.local_dir)
745
+ except subprocess.CalledProcessError as exc:
746
+ raise EnvironmentError(exc.stderr)
747
+
748
+ def git_credential_helper_store(self):
749
+ """
750
+ Sets the git credential helper to `store`
751
+ """
752
+ try:
753
+ run_subprocess("git config credential.helper store", self.local_dir)
754
+ except subprocess.CalledProcessError as exc:
755
+ raise EnvironmentError(exc.stderr)
756
+
757
+ def git_head_hash(self) -> str:
758
+ """
759
+ Get commit sha on top of HEAD.
760
+
761
+ Returns:
762
+ `str`: The current checked out commit SHA.
763
+ """
764
+ try:
765
+ p = run_subprocess("git rev-parse HEAD", self.local_dir)
766
+ return p.stdout.strip()
767
+ except subprocess.CalledProcessError as exc:
768
+ raise EnvironmentError(exc.stderr)
769
+
770
+ def git_remote_url(self) -> str:
771
+ """
772
+ Get URL to origin remote.
773
+
774
+ Returns:
775
+ `str`: The URL of the `origin` remote.
776
+ """
777
+ try:
778
+ p = run_subprocess("git config --get remote.origin.url", self.local_dir)
779
+ url = p.stdout.strip()
780
+ # Strip basic auth info.
781
+ return re.sub(r"https://.*@", "https://", url)
782
+ except subprocess.CalledProcessError as exc:
783
+ raise EnvironmentError(exc.stderr)
784
+
785
+ def git_head_commit_url(self) -> str:
786
+ """
787
+ Get URL to last commit on HEAD. We assume it's been pushed, and the url
788
+ scheme is the same one as for GitHub or HuggingFace.
789
+
790
+ Returns:
791
+ `str`: The URL to the current checked-out commit.
792
+ """
793
+ sha = self.git_head_hash()
794
+ url = self.git_remote_url()
795
+ if url.endswith("/"):
796
+ url = url[:-1]
797
+ return f"{url}/commit/{sha}"
798
+
799
+ def list_deleted_files(self) -> List[str]:
800
+ """
801
+ Returns a list of the files that are deleted in the working directory or
802
+ index.
803
+
804
+ Returns:
805
+ `List[str]`: A list of files that have been deleted in the working
806
+ directory or index.
807
+ """
808
+ try:
809
+ git_status = run_subprocess("git status -s", self.local_dir).stdout.strip()
810
+ except subprocess.CalledProcessError as exc:
811
+ raise EnvironmentError(exc.stderr)
812
+
813
+ if len(git_status) == 0:
814
+ return []
815
+
816
+ # Receives a status like the following
817
+ # D .gitignore
818
+ # D new_file.json
819
+ # AD new_file1.json
820
+ # ?? new_file2.json
821
+ # ?? new_file4.json
822
+
823
+ # Strip each line of whitespaces
824
+ modified_files_statuses = [status.strip() for status in git_status.split("\n")]
825
+
826
+ # Only keep files that are deleted using the D prefix
827
+ deleted_files_statuses = [status for status in modified_files_statuses if "D" in status.split()[0]]
828
+
829
+ # Remove the D prefix and strip to keep only the relevant filename
830
+ deleted_files = [status.split()[-1].strip() for status in deleted_files_statuses]
831
+
832
+ return deleted_files
833
+
834
+ def lfs_track(self, patterns: Union[str, List[str]], filename: bool = False):
835
+ """
836
+ Tell git-lfs to track files according to a pattern.
837
+
838
+ Setting the `filename` argument to `True` will treat the arguments as
839
+ literal filenames, not as patterns. Any special glob characters in the
840
+ filename will be escaped when writing to the `.gitattributes` file.
841
+
842
+ Args:
843
+ patterns (`Union[str, List[str]]`):
844
+ The pattern, or list of patterns, to track with git-lfs.
845
+ filename (`bool`, *optional*, defaults to `False`):
846
+ Whether to use the patterns as literal filenames.
847
+ """
848
+ if isinstance(patterns, str):
849
+ patterns = [patterns]
850
+ try:
851
+ for pattern in patterns:
852
+ run_subprocess(
853
+ f"git lfs track {'--filename' if filename else ''} {pattern}",
854
+ self.local_dir,
855
+ )
856
+ except subprocess.CalledProcessError as exc:
857
+ raise EnvironmentError(exc.stderr)
858
+
859
+ def lfs_untrack(self, patterns: Union[str, List[str]]):
860
+ """
861
+ Tell git-lfs to untrack those files.
862
+
863
+ Args:
864
+ patterns (`Union[str, List[str]]`):
865
+ The pattern, or list of patterns, to untrack with git-lfs.
866
+ """
867
+ if isinstance(patterns, str):
868
+ patterns = [patterns]
869
+ try:
870
+ for pattern in patterns:
871
+ run_subprocess("git lfs untrack".split() + [pattern], self.local_dir)
872
+ except subprocess.CalledProcessError as exc:
873
+ raise EnvironmentError(exc.stderr)
874
+
875
+ def lfs_enable_largefiles(self):
876
+ """
877
+ HF-specific. This enables upload support of files >5GB.
878
+ """
879
+ try:
880
+ lfs_config = "git config lfs.customtransfer.multipart"
881
+ run_subprocess(f"{lfs_config}.path huggingface-cli", self.local_dir)
882
+ run_subprocess(
883
+ f"{lfs_config}.args {LFS_MULTIPART_UPLOAD_COMMAND}",
884
+ self.local_dir,
885
+ )
886
+ except subprocess.CalledProcessError as exc:
887
+ raise EnvironmentError(exc.stderr)
888
+
889
+ def auto_track_binary_files(self, pattern: str = ".") -> List[str]:
890
+ """
891
+ Automatically track binary files with git-lfs.
892
+
893
+ Args:
894
+ pattern (`str`, *optional*, defaults to "."):
895
+ The pattern with which to track files that are binary.
896
+
897
+ Returns:
898
+ `List[str]`: List of filenames that are now tracked due to being
899
+ binary files
900
+ """
901
+ files_to_be_tracked_with_lfs = []
902
+
903
+ deleted_files = self.list_deleted_files()
904
+
905
+ for filename in files_to_be_staged(pattern, folder=self.local_dir):
906
+ if filename in deleted_files:
907
+ continue
908
+
909
+ path_to_file = os.path.join(os.getcwd(), self.local_dir, filename)
910
+
911
+ if not (is_tracked_with_lfs(path_to_file) or is_git_ignored(path_to_file)):
912
+ size_in_mb = os.path.getsize(path_to_file) / (1024 * 1024)
913
+
914
+ if size_in_mb >= 10:
915
+ logger.warning(
916
+ "Parsing a large file to check if binary or not. Tracking large"
917
+ " files using `repository.auto_track_large_files` is"
918
+ " recommended so as to not load the full file in memory."
919
+ )
920
+
921
+ is_binary = is_binary_file(path_to_file)
922
+
923
+ if is_binary:
924
+ self.lfs_track(filename)
925
+ files_to_be_tracked_with_lfs.append(filename)
926
+
927
+ # Cleanup the .gitattributes if files were deleted
928
+ self.lfs_untrack(deleted_files)
929
+
930
+ return files_to_be_tracked_with_lfs
931
+
932
+ def auto_track_large_files(self, pattern: str = ".") -> List[str]:
933
+ """
934
+ Automatically track large files (files that weigh more than 10MBs) with
935
+ git-lfs.
936
+
937
+ Args:
938
+ pattern (`str`, *optional*, defaults to "."):
939
+ The pattern with which to track files that are above 10MBs.
940
+
941
+ Returns:
942
+ `List[str]`: List of filenames that are now tracked due to their
943
+ size.
944
+ """
945
+ files_to_be_tracked_with_lfs = []
946
+
947
+ deleted_files = self.list_deleted_files()
948
+
949
+ for filename in files_to_be_staged(pattern, folder=self.local_dir):
950
+ if filename in deleted_files:
951
+ continue
952
+
953
+ path_to_file = os.path.join(os.getcwd(), self.local_dir, filename)
954
+ size_in_mb = os.path.getsize(path_to_file) / (1024 * 1024)
955
+
956
+ if size_in_mb >= 10 and not is_tracked_with_lfs(path_to_file) and not is_git_ignored(path_to_file):
957
+ self.lfs_track(filename)
958
+ files_to_be_tracked_with_lfs.append(filename)
959
+
960
+ # Cleanup the .gitattributes if files were deleted
961
+ self.lfs_untrack(deleted_files)
962
+
963
+ return files_to_be_tracked_with_lfs
964
+
965
+ def lfs_prune(self, recent=False):
966
+ """
967
+ git lfs prune
968
+
969
+ Args:
970
+ recent (`bool`, *optional*, defaults to `False`):
971
+ Whether to prune files even if they were referenced by recent
972
+ commits. See the following
973
+ [link](https://github.com/git-lfs/git-lfs/blob/f3d43f0428a84fc4f1e5405b76b5a73ec2437e65/docs/man/git-lfs-prune.1.ronn#recent-files)
974
+ for more information.
975
+ """
976
+ try:
977
+ with _lfs_log_progress():
978
+ result = run_subprocess(f"git lfs prune {'--recent' if recent else ''}", self.local_dir)
979
+ logger.info(result.stdout)
980
+ except subprocess.CalledProcessError as exc:
981
+ raise EnvironmentError(exc.stderr)
982
+
983
+ def git_pull(self, rebase: bool = False, lfs: bool = False):
984
+ """
985
+ git pull
986
+
987
+ Args:
988
+ rebase (`bool`, *optional*, defaults to `False`):
989
+ Whether to rebase the current branch on top of the upstream
990
+ branch after fetching.
991
+ lfs (`bool`, *optional*, defaults to `False`):
992
+ Whether to fetch the LFS files too. This option only changes the
993
+ behavior when a repository was cloned without fetching the LFS
994
+ files; calling `repo.git_pull(lfs=True)` will then fetch the LFS
995
+ file from the remote repository.
996
+ """
997
+ command = "git pull" if not lfs else "git lfs pull"
998
+ if rebase:
999
+ command += " --rebase"
1000
+ try:
1001
+ with _lfs_log_progress():
1002
+ result = run_subprocess(command, self.local_dir)
1003
+ logger.info(result.stdout)
1004
+ except subprocess.CalledProcessError as exc:
1005
+ raise EnvironmentError(exc.stderr)
1006
+
1007
+ def git_add(self, pattern: str = ".", auto_lfs_track: bool = False):
1008
+ """
1009
+ git add
1010
+
1011
+ Setting the `auto_lfs_track` parameter to `True` will automatically
1012
+ track files that are larger than 10MB with `git-lfs`.
1013
+
1014
+ Args:
1015
+ pattern (`str`, *optional*, defaults to "."):
1016
+ The pattern with which to add files to staging.
1017
+ auto_lfs_track (`bool`, *optional*, defaults to `False`):
1018
+ Whether to automatically track large and binary files with
1019
+ git-lfs. Any file over 10MB in size, or in binary format, will
1020
+ be automatically tracked.
1021
+ """
1022
+ if auto_lfs_track:
1023
+ # Track files according to their size (>=10MB)
1024
+ tracked_files = self.auto_track_large_files(pattern)
1025
+
1026
+ # Read the remaining files and track them if they're binary
1027
+ tracked_files.extend(self.auto_track_binary_files(pattern))
1028
+
1029
+ if tracked_files:
1030
+ logger.warning(
1031
+ f"Adding files tracked by Git LFS: {tracked_files}. This may take a"
1032
+ " bit of time if the files are large."
1033
+ )
1034
+
1035
+ try:
1036
+ result = run_subprocess("git add -v".split() + [pattern], self.local_dir)
1037
+ logger.info(f"Adding to index:\n{result.stdout}\n")
1038
+ except subprocess.CalledProcessError as exc:
1039
+ raise EnvironmentError(exc.stderr)
1040
+
1041
+ def git_commit(self, commit_message: str = "commit files to HF hub"):
1042
+ """
1043
+ git commit
1044
+
1045
+ Args:
1046
+ commit_message (`str`, *optional*, defaults to "commit files to HF hub"):
1047
+ The message attributed to the commit.
1048
+ """
1049
+ try:
1050
+ result = run_subprocess("git commit -v -m".split() + [commit_message], self.local_dir)
1051
+ logger.info(f"Committed:\n{result.stdout}\n")
1052
+ except subprocess.CalledProcessError as exc:
1053
+ if len(exc.stderr) > 0:
1054
+ raise EnvironmentError(exc.stderr)
1055
+ else:
1056
+ raise EnvironmentError(exc.stdout)
1057
+
1058
+ def git_push(
1059
+ self,
1060
+ upstream: Optional[str] = None,
1061
+ blocking: bool = True,
1062
+ auto_lfs_prune: bool = False,
1063
+ ) -> Union[str, Tuple[str, CommandInProgress]]:
1064
+ """
1065
+ git push
1066
+
1067
+ If used without setting `blocking`, will return url to commit on remote
1068
+ repo. If used with `blocking=True`, will return a tuple containing the
1069
+ url to commit and the command object to follow for information about the
1070
+ process.
1071
+
1072
+ Args:
1073
+ upstream (`str`, *optional*):
1074
+ Upstream to which this should push. If not specified, will push
1075
+ to the lastly defined upstream or to the default one (`origin
1076
+ main`).
1077
+ blocking (`bool`, *optional*, defaults to `True`):
1078
+ Whether the function should return only when the push has
1079
+ finished. Setting this to `False` will return an
1080
+ `CommandInProgress` object which has an `is_done` property. This
1081
+ property will be set to `True` when the push is finished.
1082
+ auto_lfs_prune (`bool`, *optional*, defaults to `False`):
1083
+ Whether to automatically prune files once they have been pushed
1084
+ to the remote.
1085
+ """
1086
+ command = "git push"
1087
+
1088
+ if upstream:
1089
+ command += f" --set-upstream {upstream}"
1090
+
1091
+ number_of_commits = commits_to_push(self.local_dir, upstream)
1092
+
1093
+ if number_of_commits > 1:
1094
+ logger.warning(f"Several commits ({number_of_commits}) will be pushed upstream.")
1095
+ if blocking:
1096
+ logger.warning("The progress bars may be unreliable.")
1097
+
1098
+ try:
1099
+ with _lfs_log_progress():
1100
+ process = subprocess.Popen(
1101
+ command.split(),
1102
+ stderr=subprocess.PIPE,
1103
+ stdout=subprocess.PIPE,
1104
+ encoding="utf-8",
1105
+ cwd=self.local_dir,
1106
+ )
1107
+
1108
+ if blocking:
1109
+ stdout, stderr = process.communicate()
1110
+ return_code = process.poll()
1111
+ process.kill()
1112
+
1113
+ if len(stderr):
1114
+ logger.warning(stderr)
1115
+
1116
+ if return_code:
1117
+ raise subprocess.CalledProcessError(return_code, process.args, output=stdout, stderr=stderr)
1118
+
1119
+ except subprocess.CalledProcessError as exc:
1120
+ raise EnvironmentError(exc.stderr)
1121
+
1122
+ if not blocking:
1123
+
1124
+ def status_method():
1125
+ status = process.poll()
1126
+ if status is None:
1127
+ return -1
1128
+ else:
1129
+ return status
1130
+
1131
+ command_in_progress = CommandInProgress(
1132
+ "push",
1133
+ is_done_method=lambda: process.poll() is not None,
1134
+ status_method=status_method,
1135
+ process=process,
1136
+ post_method=self.lfs_prune if auto_lfs_prune else None,
1137
+ )
1138
+
1139
+ self.command_queue.append(command_in_progress)
1140
+
1141
+ return self.git_head_commit_url(), command_in_progress
1142
+
1143
+ if auto_lfs_prune:
1144
+ self.lfs_prune()
1145
+
1146
+ return self.git_head_commit_url()
1147
+
1148
+ def git_checkout(self, revision: str, create_branch_ok: bool = False):
1149
+ """
1150
+ git checkout a given revision
1151
+
1152
+ Specifying `create_branch_ok` to `True` will create the branch to the
1153
+ given revision if that revision doesn't exist.
1154
+
1155
+ Args:
1156
+ revision (`str`):
1157
+ The revision to checkout.
1158
+ create_branch_ok (`str`, *optional*, defaults to `False`):
1159
+ Whether creating a branch named with the `revision` passed at
1160
+ the current checked-out reference if `revision` isn't an
1161
+ existing revision is allowed.
1162
+ """
1163
+ try:
1164
+ result = run_subprocess(f"git checkout {revision}", self.local_dir)
1165
+ logger.warning(f"Checked out {revision} from {self.current_branch}.")
1166
+ logger.warning(result.stdout)
1167
+ except subprocess.CalledProcessError as exc:
1168
+ if not create_branch_ok:
1169
+ raise EnvironmentError(exc.stderr)
1170
+ else:
1171
+ try:
1172
+ result = run_subprocess(f"git checkout -b {revision}", self.local_dir)
1173
+ logger.warning(
1174
+ f"Revision `{revision}` does not exist. Created and checked out branch `{revision}`."
1175
+ )
1176
+ logger.warning(result.stdout)
1177
+ except subprocess.CalledProcessError as exc:
1178
+ raise EnvironmentError(exc.stderr)
1179
+
1180
+ def tag_exists(self, tag_name: str, remote: Optional[str] = None) -> bool:
1181
+ """
1182
+ Check if a tag exists or not.
1183
+
1184
+ Args:
1185
+ tag_name (`str`):
1186
+ The name of the tag to check.
1187
+ remote (`str`, *optional*):
1188
+ Whether to check if the tag exists on a remote. This parameter
1189
+ should be the identifier of the remote.
1190
+
1191
+ Returns:
1192
+ `bool`: Whether the tag exists.
1193
+ """
1194
+ if remote:
1195
+ try:
1196
+ result = run_subprocess(f"git ls-remote origin refs/tags/{tag_name}", self.local_dir).stdout.strip()
1197
+ except subprocess.CalledProcessError as exc:
1198
+ raise EnvironmentError(exc.stderr)
1199
+
1200
+ return len(result) != 0
1201
+ else:
1202
+ try:
1203
+ git_tags = run_subprocess("git tag", self.local_dir).stdout.strip()
1204
+ except subprocess.CalledProcessError as exc:
1205
+ raise EnvironmentError(exc.stderr)
1206
+
1207
+ git_tags = git_tags.split("\n")
1208
+ return tag_name in git_tags
1209
+
1210
+ def delete_tag(self, tag_name: str, remote: Optional[str] = None) -> bool:
1211
+ """
1212
+ Delete a tag, both local and remote, if it exists
1213
+
1214
+ Args:
1215
+ tag_name (`str`):
1216
+ The tag name to delete.
1217
+ remote (`str`, *optional*):
1218
+ The remote on which to delete the tag.
1219
+
1220
+ Returns:
1221
+ `bool`: `True` if deleted, `False` if the tag didn't exist.
1222
+ If remote is not passed, will just be updated locally
1223
+ """
1224
+ delete_locally = True
1225
+ delete_remotely = True
1226
+
1227
+ if not self.tag_exists(tag_name):
1228
+ delete_locally = False
1229
+
1230
+ if not self.tag_exists(tag_name, remote=remote):
1231
+ delete_remotely = False
1232
+
1233
+ if delete_locally:
1234
+ try:
1235
+ run_subprocess(["git", "tag", "-d", tag_name], self.local_dir).stdout.strip()
1236
+ except subprocess.CalledProcessError as exc:
1237
+ raise EnvironmentError(exc.stderr)
1238
+
1239
+ if remote and delete_remotely:
1240
+ try:
1241
+ run_subprocess(f"git push {remote} --delete {tag_name}", self.local_dir).stdout.strip()
1242
+ except subprocess.CalledProcessError as exc:
1243
+ raise EnvironmentError(exc.stderr)
1244
+
1245
+ return True
1246
+
1247
+ def add_tag(self, tag_name: str, message: Optional[str] = None, remote: Optional[str] = None):
1248
+ """
1249
+ Add a tag at the current head and push it
1250
+
1251
+ If remote is None, will just be updated locally
1252
+
1253
+ If no message is provided, the tag will be lightweight. if a message is
1254
+ provided, the tag will be annotated.
1255
+
1256
+ Args:
1257
+ tag_name (`str`):
1258
+ The name of the tag to be added.
1259
+ message (`str`, *optional*):
1260
+ The message that accompanies the tag. The tag will turn into an
1261
+ annotated tag if a message is passed.
1262
+ remote (`str`, *optional*):
1263
+ The remote on which to add the tag.
1264
+ """
1265
+ if message:
1266
+ tag_args = ["git", "tag", "-a", tag_name, "-m", message]
1267
+ else:
1268
+ tag_args = ["git", "tag", tag_name]
1269
+
1270
+ try:
1271
+ run_subprocess(tag_args, self.local_dir).stdout.strip()
1272
+ except subprocess.CalledProcessError as exc:
1273
+ raise EnvironmentError(exc.stderr)
1274
+
1275
+ if remote:
1276
+ try:
1277
+ run_subprocess(f"git push {remote} {tag_name}", self.local_dir).stdout.strip()
1278
+ except subprocess.CalledProcessError as exc:
1279
+ raise EnvironmentError(exc.stderr)
1280
+
1281
+ def is_repo_clean(self) -> bool:
1282
+ """
1283
+ Return whether or not the git status is clean or not
1284
+
1285
+ Returns:
1286
+ `bool`: `True` if the git status is clean, `False` otherwise.
1287
+ """
1288
+ try:
1289
+ git_status = run_subprocess("git status --porcelain", self.local_dir).stdout.strip()
1290
+ except subprocess.CalledProcessError as exc:
1291
+ raise EnvironmentError(exc.stderr)
1292
+
1293
+ return len(git_status) == 0
1294
+
1295
+ def push_to_hub(
1296
+ self,
1297
+ commit_message: str = "commit files to HF hub",
1298
+ blocking: bool = True,
1299
+ clean_ok: bool = True,
1300
+ auto_lfs_prune: bool = False,
1301
+ ) -> Union[None, str, Tuple[str, CommandInProgress]]:
1302
+ """
1303
+ Helper to add, commit, and push files to remote repository on the
1304
+ HuggingFace Hub. Will automatically track large files (>10MB).
1305
+
1306
+ Args:
1307
+ commit_message (`str`):
1308
+ Message to use for the commit.
1309
+ blocking (`bool`, *optional*, defaults to `True`):
1310
+ Whether the function should return only when the `git push` has
1311
+ finished.
1312
+ clean_ok (`bool`, *optional*, defaults to `True`):
1313
+ If True, this function will return None if the repo is
1314
+ untouched. Default behavior is to fail because the git command
1315
+ fails.
1316
+ auto_lfs_prune (`bool`, *optional*, defaults to `False`):
1317
+ Whether to automatically prune files once they have been pushed
1318
+ to the remote.
1319
+ """
1320
+ if clean_ok and self.is_repo_clean():
1321
+ logger.info("Repo currently clean. Ignoring push_to_hub")
1322
+ return None
1323
+ self.git_add(auto_lfs_track=True)
1324
+ self.git_commit(commit_message)
1325
+ return self.git_push(
1326
+ upstream=f"origin {self.current_branch}",
1327
+ blocking=blocking,
1328
+ auto_lfs_prune=auto_lfs_prune,
1329
+ )
1330
+
1331
+ @contextmanager
1332
+ def commit(
1333
+ self,
1334
+ commit_message: str,
1335
+ branch: Optional[str] = None,
1336
+ track_large_files: bool = True,
1337
+ blocking: bool = True,
1338
+ auto_lfs_prune: bool = False,
1339
+ ):
1340
+ """
1341
+ Context manager utility to handle committing to a repository. This
1342
+ automatically tracks large files (>10Mb) with git-lfs. Set the
1343
+ `track_large_files` argument to `False` if you wish to ignore that
1344
+ behavior.
1345
+
1346
+ Args:
1347
+ commit_message (`str`):
1348
+ Message to use for the commit.
1349
+ branch (`str`, *optional*):
1350
+ The branch on which the commit will appear. This branch will be
1351
+ checked-out before any operation.
1352
+ track_large_files (`bool`, *optional*, defaults to `True`):
1353
+ Whether to automatically track large files or not. Will do so by
1354
+ default.
1355
+ blocking (`bool`, *optional*, defaults to `True`):
1356
+ Whether the function should return only when the `git push` has
1357
+ finished.
1358
+ auto_lfs_prune (`bool`, defaults to `True`):
1359
+ Whether to automatically prune files once they have been pushed
1360
+ to the remote.
1361
+
1362
+ Examples:
1363
+
1364
+ ```python
1365
+ >>> with Repository(
1366
+ ... "text-files",
1367
+ ... clone_from="<user>/text-files",
1368
+ ... token=True,
1369
+ >>> ).commit("My first file :)"):
1370
+ ... with open("file.txt", "w+") as f:
1371
+ ... f.write(json.dumps({"hey": 8}))
1372
+
1373
+ >>> import torch
1374
+
1375
+ >>> model = torch.nn.Transformer()
1376
+ >>> with Repository(
1377
+ ... "torch-model",
1378
+ ... clone_from="<user>/torch-model",
1379
+ ... token=True,
1380
+ >>> ).commit("My cool model :)"):
1381
+ ... torch.save(model.state_dict(), "model.pt")
1382
+ ```
1383
+
1384
+ """
1385
+
1386
+ files_to_stage = files_to_be_staged(".", folder=self.local_dir)
1387
+
1388
+ if len(files_to_stage):
1389
+ files_in_msg = str(files_to_stage[:5])[:-1] + ", ...]" if len(files_to_stage) > 5 else str(files_to_stage)
1390
+ logger.error(
1391
+ "There exists some updated files in the local repository that are not"
1392
+ f" committed: {files_in_msg}. This may lead to errors if checking out"
1393
+ " a branch. These files and their modifications will be added to the"
1394
+ " current commit."
1395
+ )
1396
+
1397
+ if branch is not None:
1398
+ self.git_checkout(branch, create_branch_ok=True)
1399
+
1400
+ if is_tracked_upstream(self.local_dir):
1401
+ logger.warning("Pulling changes ...")
1402
+ self.git_pull(rebase=True)
1403
+ else:
1404
+ logger.warning(f"The current branch has no upstream branch. Will push to 'origin {self.current_branch}'")
1405
+
1406
+ current_working_directory = os.getcwd()
1407
+ os.chdir(os.path.join(current_working_directory, self.local_dir))
1408
+
1409
+ try:
1410
+ yield self
1411
+ finally:
1412
+ self.git_add(auto_lfs_track=track_large_files)
1413
+
1414
+ try:
1415
+ self.git_commit(commit_message)
1416
+ except OSError as e:
1417
+ # If no changes are detected, there is nothing to commit.
1418
+ if "nothing to commit" not in str(e):
1419
+ raise e
1420
+
1421
+ try:
1422
+ self.git_push(
1423
+ upstream=f"origin {self.current_branch}",
1424
+ blocking=blocking,
1425
+ auto_lfs_prune=auto_lfs_prune,
1426
+ )
1427
+ except OSError as e:
1428
+ # If no changes are detected, there is nothing to commit.
1429
+ if "could not read Username" in str(e):
1430
+ raise OSError("Couldn't authenticate user for push. Did you set `token` to `True`?") from e
1431
+ else:
1432
+ raise e
1433
+
1434
+ os.chdir(current_working_directory)
1435
+
1436
+ def repocard_metadata_load(self) -> Optional[Dict]:
1437
+ filepath = os.path.join(self.local_dir, constants.REPOCARD_NAME)
1438
+ if os.path.isfile(filepath):
1439
+ return metadata_load(filepath)
1440
+ return None
1441
+
1442
+ def repocard_metadata_save(self, data: Dict) -> None:
1443
+ return metadata_save(os.path.join(self.local_dir, constants.REPOCARD_NAME), data)
1444
+
1445
+ @property
1446
+ def commands_failed(self):
1447
+ """
1448
+ Returns the asynchronous commands that failed.
1449
+ """
1450
+ return [c for c in self.command_queue if c.status > 0]
1451
+
1452
+ @property
1453
+ def commands_in_progress(self):
1454
+ """
1455
+ Returns the asynchronous commands that are currently in progress.
1456
+ """
1457
+ return [c for c in self.command_queue if not c.is_done]
1458
+
1459
+ def wait_for_commands(self):
1460
+ """
1461
+ Blocking method: blocks all subsequent execution until all commands have
1462
+ been processed.
1463
+ """
1464
+ index = 0
1465
+ for command_failed in self.commands_failed:
1466
+ logger.error(f"The {command_failed.title} command with PID {command_failed._process.pid} failed.")
1467
+ logger.error(command_failed.stderr)
1468
+
1469
+ while self.commands_in_progress:
1470
+ if index % 10 == 0:
1471
+ logger.warning(
1472
+ f"Waiting for the following commands to finish before shutting down: {self.commands_in_progress}."
1473
+ )
1474
+
1475
+ index += 1
1476
+
1477
+ time.sleep(1)
.venv/lib/python3.13/site-packages/inquirerpy-0.3.4.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ uv
.venv/lib/python3.13/site-packages/inquirerpy-0.3.4.dist-info/LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2020 Kevin Zhuang
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
.venv/lib/python3.13/site-packages/inquirerpy-0.3.4.dist-info/METADATA ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: inquirerpy
3
+ Version: 0.3.4
4
+ Summary: Python port of Inquirer.js (A collection of common interactive command-line user interfaces)
5
+ Home-page: https://github.com/kazhala/InquirerPy
6
+ License: MIT
7
+ Keywords: cli,prompt-toolkit,commandline,inquirer,development
8
+ Author: Kevin Zhuang
9
+ Author-email: kevin7441@gmail.com
10
+ Maintainer: Kevin Zhuang
11
+ Maintainer-email: kevin7441@gmail.com
12
+ Requires-Python: >=3.7,<4.0
13
+ Classifier: Development Status :: 2 - Pre-Alpha
14
+ Classifier: Environment :: Console
15
+ Classifier: Intended Audience :: Developers
16
+ Classifier: License :: OSI Approved :: MIT License
17
+ Classifier: Operating System :: Microsoft
18
+ Classifier: Operating System :: Unix
19
+ Classifier: Programming Language :: Python :: 3
20
+ Classifier: Programming Language :: Python :: 3.10
21
+ Classifier: Programming Language :: Python :: 3.7
22
+ Classifier: Programming Language :: Python :: 3.8
23
+ Classifier: Programming Language :: Python :: 3.9
24
+ Classifier: Topic :: Software Development
25
+ Classifier: Topic :: Software Development :: Libraries
26
+ Classifier: Topic :: Software Development :: Libraries :: Application Frameworks
27
+ Classifier: Topic :: Software Development :: User Interfaces
28
+ Provides-Extra: docs
29
+ Requires-Dist: Sphinx (>=4.1.2,<5.0.0); extra == "docs"
30
+ Requires-Dist: furo (>=2021.8.17-beta.43,<2022.0.0); extra == "docs"
31
+ Requires-Dist: myst-parser (>=0.15.1,<0.16.0); extra == "docs"
32
+ Requires-Dist: pfzy (>=0.3.1,<0.4.0)
33
+ Requires-Dist: prompt-toolkit (>=3.0.1,<4.0.0)
34
+ Requires-Dist: sphinx-autobuild (>=2021.3.14,<2022.0.0); extra == "docs"
35
+ Requires-Dist: sphinx-copybutton (>=0.4.0,<0.5.0); extra == "docs"
36
+ Project-URL: Documentation, https://inquirerpy.readthedocs.io
37
+ Project-URL: Repository, https://github.com/kazhala/InquirerPy
38
+ Description-Content-Type: text/markdown
39
+
40
+ # InquirerPy
41
+
42
+ [![Test](https://github.com/kazhala/InquirerPy/workflows/Test/badge.svg)](https://github.com/kazhala/InquirerPy/actions?query=workflow%3ATest)
43
+ [![Lint](https://github.com/kazhala/InquirerPy/workflows/Lint/badge.svg)](https://github.com/kazhala/InquirerPy/actions?query=workflow%3ALint)
44
+ [![Build](https://codebuild.ap-southeast-2.amazonaws.com/badges?uuid=eyJlbmNyeXB0ZWREYXRhIjoiUUYyRUIxOXBWZ0hKcUhrbXplQklMemRsTVBxbUk3bFlTdldnRGpxeEpQSXJidEtmVEVzbVNCTE1UR3VoRSt2N0NQV0VaUXlCUzNackFBNzRVUFBBS1FnPSIsIml2UGFyYW1ldGVyU3BlYyI6IloxREtFeWY4WkhxV0NFWU0iLCJtYXRlcmlhbFNldFNlcmlhbCI6MX0%3D&branch=master)](https://ap-southeast-2.console.aws.amazon.com/codesuite/codebuild/378756445655/projects/InquirerPy/history?region=ap-southeast-2&builds-meta=eyJmIjp7InRleHQiOiIifSwicyI6e30sIm4iOjIwLCJpIjowfQ)
45
+ [![Coverage](https://img.shields.io/coveralls/github/kazhala/InquirerPy?logo=coveralls)](https://coveralls.io/github/kazhala/InquirerPy?branch=master)
46
+ [![Version](https://img.shields.io/pypi/pyversions/InquirerPy)](https://pypi.org/project/InquirerPy/)
47
+ [![PyPi](https://img.shields.io/pypi/v/InquirerPy)](https://pypi.org/project/InquirerPy/)
48
+
49
+ Documentation: [inquirerpy.readthedocs.io](https://inquirerpy.readthedocs.io/)
50
+
51
+ <!-- start intro -->
52
+
53
+ ## Introduction
54
+
55
+ `InquirerPy` is a Python port of the famous [Inquirer.js](https://github.com/SBoudrias/Inquirer.js/) (A collection of common interactive command line user interfaces).
56
+ This project is a re-implementation of the [PyInquirer](https://github.com/CITGuru/PyInquirer) project, with bug fixes of known issues, new prompts, backward compatible APIs
57
+ as well as more customisation options.
58
+
59
+ <!-- end intro -->
60
+
61
+ ![Demo](https://github.com/kazhala/gif/blob/master/InquirerPy-demo.gif)
62
+
63
+ ## Motivation
64
+
65
+ [PyInquirer](https://github.com/CITGuru/PyInquirer) is a great Python port of [Inquirer.js](https://github.com/SBoudrias/Inquirer.js/), however, the project is slowly reaching
66
+ to an unmaintained state with various issues left behind and no intention to implement more feature requests. I was heavily relying on this library for other projects but
67
+ could not proceed due to the limitations.
68
+
69
+ Some noticeable ones that bother me the most:
70
+
71
+ - hard limit on `prompt_toolkit` version 1.0.3
72
+ - various color issues
73
+ - various cursor issues
74
+ - No options for VI/Emacs navigation key bindings
75
+ - Pagination option doesn't work
76
+
77
+ This project uses python3.7+ type hinting with focus on resolving above issues while providing greater customisation options.
78
+
79
+ ## Requirements
80
+
81
+ ### OS
82
+
83
+ Leveraging [prompt_toolkit](https://github.com/prompt-toolkit/python-prompt-toolkit), `InquirerPy` works cross platform for all OS. Although Unix platform may have a better experience than Windows.
84
+
85
+ ### Python
86
+
87
+ ```
88
+ python >= 3.7
89
+ ```
90
+
91
+ ## Getting Started
92
+
93
+ Checkout full documentation **[here](https://inquirerpy.readthedocs.io/)**.
94
+
95
+ ### Install
96
+
97
+ ```sh
98
+ pip3 install InquirerPy
99
+ ```
100
+
101
+ ### Quick Start
102
+
103
+ #### Classic Syntax (PyInquirer)
104
+
105
+ ```python
106
+ from InquirerPy import prompt
107
+
108
+ questions = [
109
+ {"type": "input", "message": "What's your name:", "name": "name"},
110
+ {"type": "confirm", "message": "Confirm?", "name": "confirm"},
111
+ ]
112
+ result = prompt(questions)
113
+ name = result["name"]
114
+ confirm = result["confirm"]
115
+ ```
116
+
117
+ #### Alternate Syntax
118
+
119
+ ```python
120
+ from InquirerPy import inquirer
121
+
122
+ name = inquirer.text(message="What's your name:").execute()
123
+ confirm = inquirer.confirm(message="Confirm?").execute()
124
+ ```
125
+
126
+ <!-- start migration -->
127
+
128
+ ## Migrating from PyInquirer
129
+
130
+ Most APIs from [PyInquirer](https://github.com/CITGuru/PyInquirer) should be compatible with `InquirerPy`. If you have discovered more incompatible APIs, please
131
+ create an issue or directly update README via a pull request.
132
+
133
+ ### EditorPrompt
134
+
135
+ `InquirerPy` does not support [editor](https://github.com/CITGuru/PyInquirer#editor---type-editor) prompt as of now.
136
+
137
+ ### CheckboxPrompt
138
+
139
+ The following table contains the mapping of incompatible parameters.
140
+
141
+ | PyInquirer | InquirerPy |
142
+ | --------------- | --------------- |
143
+ | pointer_sign | pointer |
144
+ | selected_sign | enabled_symbol |
145
+ | unselected_sign | disabled_symbol |
146
+
147
+ ### Style
148
+
149
+ Every style keys from [PyInquirer](https://github.com/CITGuru/PyInquirer) is present in `InquirerPy` except the ones in the following table.
150
+
151
+ | PyInquirer | InquirerPy |
152
+ | ---------- | ---------- |
153
+ | selected | pointer |
154
+
155
+ Although `InquirerPy` support all the keys from [PyInquirer](https://github.com/CITGuru/PyInquirer), the styling works slightly different.
156
+ Please refer to the [Style](https://inquirerpy.readthedocs.io/en/latest/pages/style.html) documentation for detailed information.
157
+
158
+ <!-- end migration -->
159
+
160
+ ## Similar projects
161
+
162
+ ### questionary
163
+
164
+ [questionary](https://github.com/tmbo/questionary) is a fantastic fork which supports `prompt_toolkit` 3.0.0+ with performance improvement and more customisation options.
165
+ It's already a well established and stable library.
166
+
167
+ Comparing with [questionary](https://github.com/tmbo/questionary), `InquirerPy` offers even more customisation options in styles, UI as well as key bindings. `InquirerPy` also provides a new
168
+ and powerful [fuzzy](https://inquirerpy.readthedocs.io/en/latest/pages/prompts/fuzzy.html) prompt.
169
+
170
+ ### python-inquirer
171
+
172
+ [python-inquirer](https://github.com/magmax/python-inquirer) is another great Python port of [Inquirer.js](https://github.com/SBoudrias/Inquirer.js/). Instead of using `prompt_toolkit`, it
173
+ leverages the library `blessed` to implement the UI.
174
+
175
+ Before implementing `InquirerPy`, this library came up as an alternative. It's a more stable library comparing to the original [PyInquirer](https://github.com/CITGuru/PyInquirer), however
176
+ it has a rather limited customisation options and an older UI which did not solve the issues I was facing described in the [Motivation](#Motivation) section.
177
+
178
+ Comparing with [python-inquirer](https://github.com/magmax/python-inquirer), `InquirerPy` offers a slightly better UI,
179
+ more customisation options in key bindings and styles, providing pagination as well as more prompts.
180
+
181
+ ## Credit
182
+
183
+ This project is based on the great work done by the following projects & their authors.
184
+
185
+ - [PyInquirer](https://github.com/CITGuru/PyInquirer)
186
+ - [prompt_toolkit](https://github.com/prompt-toolkit/python-prompt-toolkit)
187
+
188
+ ## License
189
+
190
+ This project is licensed under [MIT](https://github.com/kazhala/InquirerPy/blob/master/LICENSE).
191
+
.venv/lib/python3.13/site-packages/inquirerpy-0.3.4.dist-info/RECORD ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ InquirerPy/__init__.py,sha256=qedqHHt5TKcRJSOEm9NlnrnLbwIoJ3klfGEyao7cFDY,92
2
+ InquirerPy/base/__init__.py,sha256=Xmc1pD9KxTVcEFusd1uDFTUbye457b42Wq3nKY5hfPI,396
3
+ InquirerPy/base/complex.py,sha256=pZ7EyLwJCoYJiTtVRSOv1mPK8Ceny0Fnc_uv2_EXJcs,10314
4
+ InquirerPy/base/control.py,sha256=o8v5Kc0O7EQL8FS4wKiiAvsGdWcEOm-ny9nzm2_6zL4,8334
5
+ InquirerPy/base/list.py,sha256=5YzgFpJWERavnopkHiL-1UnngWB8ZS9QsY4XXfW-C2g,8244
6
+ InquirerPy/base/simple.py,sha256=PFqJN8GtKNcPTXkmTSprWH2rSdu_IXsxJTSugxaReZ0,13448
7
+ InquirerPy/containers/__init__.py,sha256=Of9WUUzouFf7HbNXPYXtsgCVZe2iFOBSLYDjP3tV8do,35
8
+ InquirerPy/containers/instruction.py,sha256=30zg-Bd_WR4DHBLnPsl23tAfHpiPxr0mgvNBo1730Yk,1273
9
+ InquirerPy/containers/message.py,sha256=K54H5o7xV4CaTH8x_dWvhmrP9c3mFO4xY9KTdoAyD-Q,1476
10
+ InquirerPy/containers/spinner.py,sha256=7IA2oRDpwpW0kva-bM5LDFd6tsqenhCXXRsd8kutDrw,3954
11
+ InquirerPy/containers/validation.py,sha256=J4pbQD1n5D-jdy-S0ozdUJM7P6lxc6VUBVd-EP3lRYo,1904
12
+ InquirerPy/enum.py,sha256=5jj4bG2Tjjr4GjpFOoBpCxpwNJ0hbw-kA1ic0WvKeWI,293
13
+ InquirerPy/exceptions.py,sha256=gQyHPx0PbKNUG1XUm4hmPqq5kzaXkAQviKYb4v8NNAw,598
14
+ InquirerPy/inquirer.py,sha256=KFvCyiPUAgp1i4hJSA4xLdkSfMhcYezjRG6CICWbU78,800
15
+ InquirerPy/prompts/__init__.py,sha256=oJVhPbnR67_x20GvFIav8WpAg-tM-KiUjJFHRddh3f0,567
16
+ InquirerPy/prompts/checkbox.py,sha256=zKxYwgv4IrpITT0wzUvjXw3QFVU25rAIHStdZUPgmGU,10789
17
+ InquirerPy/prompts/confirm.py,sha256=NtQW-3j5adKOA2PSvdoinAfXAsC441LGezsAew7Bivo,8522
18
+ InquirerPy/prompts/expand.py,sha256=Yz_pqTqo9i1M-qm6DxDyoS_XvT88MpUZY332f8OOF80,19208
19
+ InquirerPy/prompts/filepath.py,sha256=tEetCSGdN9u1AFNFfM0_kjJ0rwxBK0-55A0Od0FML8M,8380
20
+ InquirerPy/prompts/fuzzy.py,sha256=7G4z0k6c0pSwrvk-Y-yk70G3og51siSMI0qyojzn0ho,27709
21
+ InquirerPy/prompts/input.py,sha256=Cyu0fGXgdoA6mhUzbh7PstEBTlkVEEcntoasmLA9wP0,11248
22
+ InquirerPy/prompts/list.py,sha256=bFOn5cCQzfnFAtKZlgzQp7mful54QzvGiNG3ti2-snw,15616
23
+ InquirerPy/prompts/number.py,sha256=fJ1lmZ8nsBRIU95CV6TjyDTo2lKfWX3Q00udCykpFSU,25154
24
+ InquirerPy/prompts/rawlist.py,sha256=qb3ZLLA2XCLsJFFLIeX0z7HylZIcPfvfqj4KR4dYWGA,12589
25
+ InquirerPy/prompts/secret.py,sha256=uZtT_HIDfTfRRb5xuFS6IQ_ub9ZirHs_KYTNDciHDvg,6099
26
+ InquirerPy/py.typed,sha256=E0Fb-ljbipVe3fXv9yM10FgkDz3ks8ADvBjROBPyLxM,70
27
+ InquirerPy/resolver.py,sha256=L2PnXpp7HwM4Vp4mvNXjZ_CiX-BFgQ4KgnMTjXDMihI,8396
28
+ InquirerPy/separator.py,sha256=Om-lhbj9kOGocwhKdQ2Wex6CTJH69CKar72wPZRETQY,693
29
+ InquirerPy/utils.py,sha256=Lf0nYT1xNJobjQrNR5Z0vDIR4jJeITFLeI0EfEuYAm8,11224
30
+ InquirerPy/validator.py,sha256=exRQ0dEdfaVScVYuSsPfLNuhwWIVueHHw1kUYAjGr2w,5907
31
+ inquirerpy-0.3.4.dist-info/INSTALLER,sha256=5hhM4Q4mYTT9z6QB6PGpUAW81PGNFrYrdXMj4oM_6ak,2
32
+ inquirerpy-0.3.4.dist-info/LICENSE,sha256=f_X1vlC7GWiYmTHt2_p9ESxTHEj_V3y4xAbsCRGCx6I,1069
33
+ inquirerpy-0.3.4.dist-info/METADATA,sha256=E6RR6q3qbYpH4eVeNPppbd1UV18ywnN7jjuxtJQnis8,8145
34
+ inquirerpy-0.3.4.dist-info/RECORD,,
35
+ inquirerpy-0.3.4.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
36
+ inquirerpy-0.3.4.dist-info/WHEEL,sha256=y3eDiaFVSNTPbgzfNn0nYn5tEn1cX6WrdetDlQM4xWw,83
.venv/lib/python3.13/site-packages/inquirerpy-0.3.4.dist-info/REQUESTED ADDED
File without changes
.venv/lib/python3.13/site-packages/inquirerpy-0.3.4.dist-info/WHEEL ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: poetry 1.0.7
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
2022/.DS_Store ADDED
Binary file (6.15 kB). View file