ADAPT-Chase commited on
Commit
1097442
·
verified ·
1 Parent(s): 8d1427d

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +23 -3
  2. tool_server/.venv/lib/python3.12/site-packages/fsspec/__pycache__/__init__.cpython-312.pyc +0 -0
  3. tool_server/.venv/lib/python3.12/site-packages/fsspec/__pycache__/_version.cpython-312.pyc +0 -0
  4. tool_server/.venv/lib/python3.12/site-packages/fsspec/__pycache__/archive.cpython-312.pyc +0 -0
  5. tool_server/.venv/lib/python3.12/site-packages/fsspec/__pycache__/asyn.cpython-312.pyc +0 -0
  6. tool_server/.venv/lib/python3.12/site-packages/fsspec/__pycache__/caching.cpython-312.pyc +0 -0
  7. tool_server/.venv/lib/python3.12/site-packages/fsspec/__pycache__/callbacks.cpython-312.pyc +0 -0
  8. tool_server/.venv/lib/python3.12/site-packages/fsspec/__pycache__/compression.cpython-312.pyc +0 -0
  9. tool_server/.venv/lib/python3.12/site-packages/fsspec/__pycache__/config.cpython-312.pyc +0 -0
  10. tool_server/.venv/lib/python3.12/site-packages/fsspec/__pycache__/conftest.cpython-312.pyc +0 -0
  11. tool_server/.venv/lib/python3.12/site-packages/fsspec/__pycache__/core.cpython-312.pyc +0 -0
  12. tool_server/.venv/lib/python3.12/site-packages/fsspec/__pycache__/dircache.cpython-312.pyc +0 -0
  13. tool_server/.venv/lib/python3.12/site-packages/fsspec/__pycache__/exceptions.cpython-312.pyc +0 -0
  14. tool_server/.venv/lib/python3.12/site-packages/fsspec/__pycache__/fuse.cpython-312.pyc +0 -0
  15. tool_server/.venv/lib/python3.12/site-packages/fsspec/__pycache__/generic.cpython-312.pyc +0 -0
  16. tool_server/.venv/lib/python3.12/site-packages/fsspec/__pycache__/gui.cpython-312.pyc +0 -0
  17. tool_server/.venv/lib/python3.12/site-packages/fsspec/__pycache__/json.cpython-312.pyc +0 -0
  18. tool_server/.venv/lib/python3.12/site-packages/fsspec/__pycache__/mapping.cpython-312.pyc +0 -0
  19. tool_server/.venv/lib/python3.12/site-packages/fsspec/__pycache__/parquet.cpython-312.pyc +0 -0
  20. tool_server/.venv/lib/python3.12/site-packages/fsspec/__pycache__/registry.cpython-312.pyc +0 -0
  21. tool_server/.venv/lib/python3.12/site-packages/fsspec/__pycache__/spec.cpython-312.pyc +0 -0
  22. tool_server/.venv/lib/python3.12/site-packages/fsspec/__pycache__/transaction.cpython-312.pyc +0 -0
  23. tool_server/.venv/lib/python3.12/site-packages/fsspec/__pycache__/utils.cpython-312.pyc +0 -0
  24. tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/__init__.py +0 -0
  25. tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/__pycache__/__init__.cpython-312.pyc +0 -0
  26. tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/__pycache__/arrow.cpython-312.pyc +0 -0
  27. tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/__pycache__/asyn_wrapper.cpython-312.pyc +0 -0
  28. tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/__pycache__/cache_mapper.cpython-312.pyc +0 -0
  29. tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/__pycache__/cache_metadata.cpython-312.pyc +0 -0
  30. tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/__pycache__/cached.cpython-312.pyc +0 -0
  31. tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/__pycache__/dask.cpython-312.pyc +0 -0
  32. tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/__pycache__/data.cpython-312.pyc +0 -0
  33. tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/__pycache__/dbfs.cpython-312.pyc +0 -0
  34. tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/__pycache__/dirfs.cpython-312.pyc +0 -0
  35. tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/__pycache__/ftp.cpython-312.pyc +0 -0
  36. tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/arrow.py +304 -0
  37. tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/asyn_wrapper.py +122 -0
  38. tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/cache_mapper.py +75 -0
  39. tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/cache_metadata.py +233 -0
  40. tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/cached.py +998 -0
  41. tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/dask.py +152 -0
  42. tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/data.py +58 -0
  43. tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/dbfs.py +496 -0
  44. tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/dirfs.py +388 -0
  45. tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/ftp.py +387 -0
  46. tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/gist.py +232 -0
  47. tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/git.py +114 -0
  48. tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/github.py +333 -0
  49. tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/http.py +890 -0
  50. tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/http_sync.py +931 -0
.gitattributes CHANGED
@@ -4059,6 +4059,26 @@ tool_server/.venv/lib/python3.12/site-packages/nvidia/cufile/lib/libcufile.so.0
4059
  tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_runtime/lib/libcudart.so.12 filter=lfs diff=lfs merge=lfs -text
4060
  tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_nvrtc/lib/libnvrtc-builtins.so.12.6 filter=lfs diff=lfs merge=lfs -text
4061
  tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_nvrtc/lib/libnvrtc.so.12 filter=lfs diff=lfs merge=lfs -text
4062
- tool_server/.venv/lib/python3.12/site-packages/llvmlite/binding/libllvmlite.so filter=lfs diff=lfs merge=lfs -text
4063
- tool_server/.venv/lib/python3.12/site-packages/llvmlite/tests/__pycache__/test_binding.cpython-312.pyc filter=lfs diff=lfs merge=lfs -text
4064
- tool_server/.venv/lib/python3.12/site-packages/llvmlite/tests/__pycache__/test_ir.cpython-312.pyc filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4059
  tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_runtime/lib/libcudart.so.12 filter=lfs diff=lfs merge=lfs -text
4060
  tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_nvrtc/lib/libnvrtc-builtins.so.12.6 filter=lfs diff=lfs merge=lfs -text
4061
  tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_nvrtc/lib/libnvrtc.so.12 filter=lfs diff=lfs merge=lfs -text
4062
+ tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/lib/libcheckpoint.so filter=lfs diff=lfs merge=lfs -text
4063
+ tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/lib/libcupti.so.12 filter=lfs diff=lfs merge=lfs -text
4064
+ tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/lib/libnvperf_host.so filter=lfs diff=lfs merge=lfs -text
4065
+ tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/lib/libnvperf_target.so filter=lfs diff=lfs merge=lfs -text
4066
+ tool_server/.venv/lib/python3.12/site-packages/nvidia/cuda_cupti/lib/libpcsamplingutil.so filter=lfs diff=lfs merge=lfs -text
4067
+ tool_server/.venv/lib/python3.12/site-packages/nvidia/cublas/lib/libcublas.so.12 filter=lfs diff=lfs merge=lfs -text
4068
+ tool_server/.venv/lib/python3.12/site-packages/nvidia/cublas/lib/libcublasLt.so.12 filter=lfs diff=lfs merge=lfs -text
4069
+ tool_server/.venv/lib/python3.12/site-packages/nvidia/cublas/lib/libnvblas.so.12 filter=lfs diff=lfs merge=lfs -text
4070
+ tool_server/.venv/lib/python3.12/site-packages/nvidia/cusparse/lib/libcusparse.so.12 filter=lfs diff=lfs merge=lfs -text
4071
+ tool_server/.venv/lib/python3.12/site-packages/nvidia/cufft/lib/libcufft.so.11 filter=lfs diff=lfs merge=lfs -text
4072
+ tool_server/.venv/lib/python3.12/site-packages/nvidia/cufft/lib/libcufftw.so.11 filter=lfs diff=lfs merge=lfs -text
4073
+ tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/lib/libcudnn.so.9 filter=lfs diff=lfs merge=lfs -text
4074
+ tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/lib/libcudnn_adv.so.9 filter=lfs diff=lfs merge=lfs -text
4075
+ tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/lib/libcudnn_cnn.so.9 filter=lfs diff=lfs merge=lfs -text
4076
+ tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/lib/libcudnn_engines_precompiled.so.9 filter=lfs diff=lfs merge=lfs -text
4077
+ tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/lib/libcudnn_engines_runtime_compiled.so.9 filter=lfs diff=lfs merge=lfs -text
4078
+ tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/lib/libcudnn_graph.so.9 filter=lfs diff=lfs merge=lfs -text
4079
+ tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/lib/libcudnn_heuristic.so.9 filter=lfs diff=lfs merge=lfs -text
4080
+ tool_server/.venv/lib/python3.12/site-packages/nvidia/cudnn/lib/libcudnn_ops.so.9 filter=lfs diff=lfs merge=lfs -text
4081
+ tool_server/.venv/lib/python3.12/site-packages/nvidia/cusolver/lib/libcusolver.so.11 filter=lfs diff=lfs merge=lfs -text
4082
+ tool_server/.venv/lib/python3.12/site-packages/nvidia/cusolver/lib/libcusolverMg.so.11 filter=lfs diff=lfs merge=lfs -text
4083
+ tool_server/.venv/lib/python3.12/site-packages/httptools/parser/parser.cpython-312-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
4084
+ tool_server/.venv/lib/python3.12/site-packages/httptools/parser/url_parser.cpython-312-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
tool_server/.venv/lib/python3.12/site-packages/fsspec/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (1.9 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/fsspec/__pycache__/_version.cpython-312.pyc ADDED
Binary file (787 Bytes). View file
 
tool_server/.venv/lib/python3.12/site-packages/fsspec/__pycache__/archive.cpython-312.pyc ADDED
Binary file (4.11 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/fsspec/__pycache__/asyn.cpython-312.pyc ADDED
Binary file (45.3 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/fsspec/__pycache__/caching.cpython-312.pyc ADDED
Binary file (40.2 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/fsspec/__pycache__/callbacks.cpython-312.pyc ADDED
Binary file (12.9 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/fsspec/__pycache__/compression.cpython-312.pyc ADDED
Binary file (7.41 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/fsspec/__pycache__/config.cpython-312.pyc ADDED
Binary file (6.03 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/fsspec/__pycache__/conftest.cpython-312.pyc ADDED
Binary file (3.13 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/fsspec/__pycache__/core.cpython-312.pyc ADDED
Binary file (28.3 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/fsspec/__pycache__/dircache.cpython-312.pyc ADDED
Binary file (4.52 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/fsspec/__pycache__/exceptions.cpython-312.pyc ADDED
Binary file (832 Bytes). View file
 
tool_server/.venv/lib/python3.12/site-packages/fsspec/__pycache__/fuse.cpython-312.pyc ADDED
Binary file (15.7 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/fsspec/__pycache__/generic.cpython-312.pyc ADDED
Binary file (18.2 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/fsspec/__pycache__/gui.cpython-312.pyc ADDED
Binary file (21.6 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/fsspec/__pycache__/json.cpython-312.pyc ADDED
Binary file (6.16 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/fsspec/__pycache__/mapping.cpython-312.pyc ADDED
Binary file (12.2 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/fsspec/__pycache__/parquet.cpython-312.pyc ADDED
Binary file (15.7 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/fsspec/__pycache__/registry.cpython-312.pyc ADDED
Binary file (11.5 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/fsspec/__pycache__/spec.cpython-312.pyc ADDED
Binary file (91 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/fsspec/__pycache__/transaction.cpython-312.pyc ADDED
Binary file (4.59 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/fsspec/__pycache__/utils.cpython-312.pyc ADDED
Binary file (28.3 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/__init__.py ADDED
File without changes
tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (192 Bytes). View file
 
tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/__pycache__/arrow.cpython-312.pyc ADDED
Binary file (13.5 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/__pycache__/asyn_wrapper.cpython-312.pyc ADDED
Binary file (5.56 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/__pycache__/cache_mapper.cpython-312.pyc ADDED
Binary file (4 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/__pycache__/cache_metadata.cpython-312.pyc ADDED
Binary file (11.2 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/__pycache__/cached.cpython-312.pyc ADDED
Binary file (46.8 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/__pycache__/dask.cpython-312.pyc ADDED
Binary file (7.11 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/__pycache__/data.cpython-312.pyc ADDED
Binary file (3.04 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/__pycache__/dbfs.cpython-312.pyc ADDED
Binary file (19.5 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/__pycache__/dirfs.cpython-312.pyc ADDED
Binary file (24.8 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/__pycache__/ftp.cpython-312.pyc ADDED
Binary file (17.3 kB). View file
 
tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/arrow.py ADDED
@@ -0,0 +1,304 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import errno
2
+ import io
3
+ import os
4
+ import secrets
5
+ import shutil
6
+ from contextlib import suppress
7
+ from functools import cached_property, wraps
8
+ from urllib.parse import parse_qs
9
+
10
+ from fsspec.spec import AbstractFileSystem
11
+ from fsspec.utils import (
12
+ get_package_version_without_import,
13
+ infer_storage_options,
14
+ mirror_from,
15
+ tokenize,
16
+ )
17
+
18
+
19
+ def wrap_exceptions(func):
20
+ @wraps(func)
21
+ def wrapper(*args, **kwargs):
22
+ try:
23
+ return func(*args, **kwargs)
24
+ except OSError as exception:
25
+ if not exception.args:
26
+ raise
27
+
28
+ message, *args = exception.args
29
+ if isinstance(message, str) and "does not exist" in message:
30
+ raise FileNotFoundError(errno.ENOENT, message) from exception
31
+ else:
32
+ raise
33
+
34
+ return wrapper
35
+
36
+
37
+ PYARROW_VERSION = None
38
+
39
+
40
+ class ArrowFSWrapper(AbstractFileSystem):
41
+ """FSSpec-compatible wrapper of pyarrow.fs.FileSystem.
42
+
43
+ Parameters
44
+ ----------
45
+ fs : pyarrow.fs.FileSystem
46
+
47
+ """
48
+
49
+ root_marker = "/"
50
+
51
+ def __init__(self, fs, **kwargs):
52
+ global PYARROW_VERSION
53
+ PYARROW_VERSION = get_package_version_without_import("pyarrow")
54
+ self.fs = fs
55
+ super().__init__(**kwargs)
56
+
57
+ @property
58
+ def protocol(self):
59
+ return self.fs.type_name
60
+
61
+ @cached_property
62
+ def fsid(self):
63
+ return "hdfs_" + tokenize(self.fs.host, self.fs.port)
64
+
65
+ @classmethod
66
+ def _strip_protocol(cls, path):
67
+ ops = infer_storage_options(path)
68
+ path = ops["path"]
69
+ if path.startswith("//"):
70
+ # special case for "hdfs://path" (without the triple slash)
71
+ path = path[1:]
72
+ return path
73
+
74
+ def ls(self, path, detail=False, **kwargs):
75
+ path = self._strip_protocol(path)
76
+ from pyarrow.fs import FileSelector
77
+
78
+ entries = [
79
+ self._make_entry(entry)
80
+ for entry in self.fs.get_file_info(FileSelector(path))
81
+ ]
82
+ if detail:
83
+ return entries
84
+ else:
85
+ return [entry["name"] for entry in entries]
86
+
87
+ def info(self, path, **kwargs):
88
+ path = self._strip_protocol(path)
89
+ [info] = self.fs.get_file_info([path])
90
+ return self._make_entry(info)
91
+
92
+ def exists(self, path):
93
+ path = self._strip_protocol(path)
94
+ try:
95
+ self.info(path)
96
+ except FileNotFoundError:
97
+ return False
98
+ else:
99
+ return True
100
+
101
+ def _make_entry(self, info):
102
+ from pyarrow.fs import FileType
103
+
104
+ if info.type is FileType.Directory:
105
+ kind = "directory"
106
+ elif info.type is FileType.File:
107
+ kind = "file"
108
+ elif info.type is FileType.NotFound:
109
+ raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), info.path)
110
+ else:
111
+ kind = "other"
112
+
113
+ return {
114
+ "name": info.path,
115
+ "size": info.size,
116
+ "type": kind,
117
+ "mtime": info.mtime,
118
+ }
119
+
120
+ @wrap_exceptions
121
+ def cp_file(self, path1, path2, **kwargs):
122
+ path1 = self._strip_protocol(path1).rstrip("/")
123
+ path2 = self._strip_protocol(path2).rstrip("/")
124
+
125
+ with self._open(path1, "rb") as lstream:
126
+ tmp_fname = f"{path2}.tmp.{secrets.token_hex(6)}"
127
+ try:
128
+ with self.open(tmp_fname, "wb") as rstream:
129
+ shutil.copyfileobj(lstream, rstream)
130
+ self.fs.move(tmp_fname, path2)
131
+ except BaseException:
132
+ with suppress(FileNotFoundError):
133
+ self.fs.delete_file(tmp_fname)
134
+ raise
135
+
136
+ @wrap_exceptions
137
+ def mv(self, path1, path2, **kwargs):
138
+ path1 = self._strip_protocol(path1).rstrip("/")
139
+ path2 = self._strip_protocol(path2).rstrip("/")
140
+ self.fs.move(path1, path2)
141
+
142
+ @wrap_exceptions
143
+ def rm_file(self, path):
144
+ path = self._strip_protocol(path)
145
+ self.fs.delete_file(path)
146
+
147
+ @wrap_exceptions
148
+ def rm(self, path, recursive=False, maxdepth=None):
149
+ path = self._strip_protocol(path).rstrip("/")
150
+ if self.isdir(path):
151
+ if recursive:
152
+ self.fs.delete_dir(path)
153
+ else:
154
+ raise ValueError("Can't delete directories without recursive=False")
155
+ else:
156
+ self.fs.delete_file(path)
157
+
158
+ @wrap_exceptions
159
+ def _open(self, path, mode="rb", block_size=None, seekable=True, **kwargs):
160
+ if mode == "rb":
161
+ if seekable:
162
+ method = self.fs.open_input_file
163
+ else:
164
+ method = self.fs.open_input_stream
165
+ elif mode == "wb":
166
+ method = self.fs.open_output_stream
167
+ elif mode == "ab":
168
+ method = self.fs.open_append_stream
169
+ else:
170
+ raise ValueError(f"unsupported mode for Arrow filesystem: {mode!r}")
171
+
172
+ _kwargs = {}
173
+ if mode != "rb" or not seekable:
174
+ if int(PYARROW_VERSION.split(".")[0]) >= 4:
175
+ # disable compression auto-detection
176
+ _kwargs["compression"] = None
177
+ stream = method(path, **_kwargs)
178
+
179
+ return ArrowFile(self, stream, path, mode, block_size, **kwargs)
180
+
181
+ @wrap_exceptions
182
+ def mkdir(self, path, create_parents=True, **kwargs):
183
+ path = self._strip_protocol(path)
184
+ if create_parents:
185
+ self.makedirs(path, exist_ok=True)
186
+ else:
187
+ self.fs.create_dir(path, recursive=False)
188
+
189
+ @wrap_exceptions
190
+ def makedirs(self, path, exist_ok=False):
191
+ path = self._strip_protocol(path)
192
+ self.fs.create_dir(path, recursive=True)
193
+
194
+ @wrap_exceptions
195
+ def rmdir(self, path):
196
+ path = self._strip_protocol(path)
197
+ self.fs.delete_dir(path)
198
+
199
+ @wrap_exceptions
200
+ def modified(self, path):
201
+ path = self._strip_protocol(path)
202
+ return self.fs.get_file_info(path).mtime
203
+
204
+ def cat_file(self, path, start=None, end=None, **kwargs):
205
+ kwargs["seekable"] = start not in [None, 0]
206
+ return super().cat_file(path, start=None, end=None, **kwargs)
207
+
208
+ def get_file(self, rpath, lpath, **kwargs):
209
+ kwargs["seekable"] = False
210
+ super().get_file(rpath, lpath, **kwargs)
211
+
212
+
213
+ @mirror_from(
214
+ "stream",
215
+ [
216
+ "read",
217
+ "seek",
218
+ "tell",
219
+ "write",
220
+ "readable",
221
+ "writable",
222
+ "close",
223
+ "size",
224
+ "seekable",
225
+ ],
226
+ )
227
+ class ArrowFile(io.IOBase):
228
+ def __init__(self, fs, stream, path, mode, block_size=None, **kwargs):
229
+ self.path = path
230
+ self.mode = mode
231
+
232
+ self.fs = fs
233
+ self.stream = stream
234
+
235
+ self.blocksize = self.block_size = block_size
236
+ self.kwargs = kwargs
237
+
238
+ def __enter__(self):
239
+ return self
240
+
241
+ def __exit__(self, *args):
242
+ return self.close()
243
+
244
+
245
+ class HadoopFileSystem(ArrowFSWrapper):
246
+ """A wrapper on top of the pyarrow.fs.HadoopFileSystem
247
+ to connect it's interface with fsspec"""
248
+
249
+ protocol = "hdfs"
250
+
251
+ def __init__(
252
+ self,
253
+ host="default",
254
+ port=0,
255
+ user=None,
256
+ kerb_ticket=None,
257
+ replication=3,
258
+ extra_conf=None,
259
+ **kwargs,
260
+ ):
261
+ """
262
+
263
+ Parameters
264
+ ----------
265
+ host: str
266
+ Hostname, IP or "default" to try to read from Hadoop config
267
+ port: int
268
+ Port to connect on, or default from Hadoop config if 0
269
+ user: str or None
270
+ If given, connect as this username
271
+ kerb_ticket: str or None
272
+ If given, use this ticket for authentication
273
+ replication: int
274
+ set replication factor of file for write operations. default value is 3.
275
+ extra_conf: None or dict
276
+ Passed on to HadoopFileSystem
277
+ """
278
+ from pyarrow.fs import HadoopFileSystem
279
+
280
+ fs = HadoopFileSystem(
281
+ host=host,
282
+ port=port,
283
+ user=user,
284
+ kerb_ticket=kerb_ticket,
285
+ replication=replication,
286
+ extra_conf=extra_conf,
287
+ )
288
+ super().__init__(fs=fs, **kwargs)
289
+
290
+ @staticmethod
291
+ def _get_kwargs_from_urls(path):
292
+ ops = infer_storage_options(path)
293
+ out = {}
294
+ if ops.get("host", None):
295
+ out["host"] = ops["host"]
296
+ if ops.get("username", None):
297
+ out["user"] = ops["username"]
298
+ if ops.get("port", None):
299
+ out["port"] = ops["port"]
300
+ if ops.get("url_query", None):
301
+ queries = parse_qs(ops["url_query"])
302
+ if queries.get("replication", None):
303
+ out["replication"] = int(queries["replication"][0])
304
+ return out
tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/asyn_wrapper.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import functools
3
+ import inspect
4
+
5
+ import fsspec
6
+ from fsspec.asyn import AsyncFileSystem, running_async
7
+
8
+
9
+ def async_wrapper(func, obj=None, semaphore=None):
10
+ """
11
+ Wraps a synchronous function to make it awaitable.
12
+
13
+ Parameters
14
+ ----------
15
+ func : callable
16
+ The synchronous function to wrap.
17
+ obj : object, optional
18
+ The instance to bind the function to, if applicable.
19
+ semaphore : asyncio.Semaphore, optional
20
+ A semaphore to limit concurrent calls.
21
+
22
+ Returns
23
+ -------
24
+ coroutine
25
+ An awaitable version of the function.
26
+ """
27
+
28
+ @functools.wraps(func)
29
+ async def wrapper(*args, **kwargs):
30
+ if semaphore:
31
+ async with semaphore:
32
+ return await asyncio.to_thread(func, *args, **kwargs)
33
+ return await asyncio.to_thread(func, *args, **kwargs)
34
+
35
+ return wrapper
36
+
37
+
38
+ class AsyncFileSystemWrapper(AsyncFileSystem):
39
+ """
40
+ A wrapper class to convert a synchronous filesystem into an asynchronous one.
41
+
42
+ This class takes an existing synchronous filesystem implementation and wraps all
43
+ its methods to provide an asynchronous interface.
44
+
45
+ Parameters
46
+ ----------
47
+ sync_fs : AbstractFileSystem
48
+ The synchronous filesystem instance to wrap.
49
+ """
50
+
51
+ protocol = "asyncwrapper", "async_wrapper"
52
+ cachable = False
53
+
54
+ def __init__(
55
+ self,
56
+ fs=None,
57
+ asynchronous=None,
58
+ target_protocol=None,
59
+ target_options=None,
60
+ semaphore=None,
61
+ max_concurrent_tasks=None,
62
+ **kwargs,
63
+ ):
64
+ if asynchronous is None:
65
+ asynchronous = running_async()
66
+ super().__init__(asynchronous=asynchronous, **kwargs)
67
+ if fs is not None:
68
+ self.sync_fs = fs
69
+ else:
70
+ self.sync_fs = fsspec.filesystem(target_protocol, **target_options)
71
+ self.protocol = self.sync_fs.protocol
72
+ self.semaphore = semaphore
73
+ self._wrap_all_sync_methods()
74
+
75
+ @property
76
+ def fsid(self):
77
+ return f"async_{self.sync_fs.fsid}"
78
+
79
+ def _wrap_all_sync_methods(self):
80
+ """
81
+ Wrap all synchronous methods of the underlying filesystem with asynchronous versions.
82
+ """
83
+ excluded_methods = {"open"}
84
+ for method_name in dir(self.sync_fs):
85
+ if method_name.startswith("_") or method_name in excluded_methods:
86
+ continue
87
+
88
+ attr = inspect.getattr_static(self.sync_fs, method_name)
89
+ if isinstance(attr, property):
90
+ continue
91
+
92
+ method = getattr(self.sync_fs, method_name)
93
+ if callable(method) and not inspect.iscoroutinefunction(method):
94
+ async_method = async_wrapper(method, obj=self, semaphore=self.semaphore)
95
+ setattr(self, f"_{method_name}", async_method)
96
+
97
+ @classmethod
98
+ def wrap_class(cls, sync_fs_class):
99
+ """
100
+ Create a new class that can be used to instantiate an AsyncFileSystemWrapper
101
+ with lazy instantiation of the underlying synchronous filesystem.
102
+
103
+ Parameters
104
+ ----------
105
+ sync_fs_class : type
106
+ The class of the synchronous filesystem to wrap.
107
+
108
+ Returns
109
+ -------
110
+ type
111
+ A new class that wraps the provided synchronous filesystem class.
112
+ """
113
+
114
+ class GeneratedAsyncFileSystemWrapper(cls):
115
+ def __init__(self, *args, **kwargs):
116
+ sync_fs = sync_fs_class(*args, **kwargs)
117
+ super().__init__(sync_fs)
118
+
119
+ GeneratedAsyncFileSystemWrapper.__name__ = (
120
+ f"Async{sync_fs_class.__name__}Wrapper"
121
+ )
122
+ return GeneratedAsyncFileSystemWrapper
tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/cache_mapper.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import abc
4
+ import hashlib
5
+
6
+ from fsspec.implementations.local import make_path_posix
7
+
8
+
9
+ class AbstractCacheMapper(abc.ABC):
10
+ """Abstract super-class for mappers from remote URLs to local cached
11
+ basenames.
12
+ """
13
+
14
+ @abc.abstractmethod
15
+ def __call__(self, path: str) -> str: ...
16
+
17
+ def __eq__(self, other: object) -> bool:
18
+ # Identity only depends on class. When derived classes have attributes
19
+ # they will need to be included.
20
+ return isinstance(other, type(self))
21
+
22
+ def __hash__(self) -> int:
23
+ # Identity only depends on class. When derived classes have attributes
24
+ # they will need to be included.
25
+ return hash(type(self))
26
+
27
+
28
+ class BasenameCacheMapper(AbstractCacheMapper):
29
+ """Cache mapper that uses the basename of the remote URL and a fixed number
30
+ of directory levels above this.
31
+
32
+ The default is zero directory levels, meaning different paths with the same
33
+ basename will have the same cached basename.
34
+ """
35
+
36
+ def __init__(self, directory_levels: int = 0):
37
+ if directory_levels < 0:
38
+ raise ValueError(
39
+ "BasenameCacheMapper requires zero or positive directory_levels"
40
+ )
41
+ self.directory_levels = directory_levels
42
+
43
+ # Separator for directories when encoded as strings.
44
+ self._separator = "_@_"
45
+
46
+ def __call__(self, path: str) -> str:
47
+ path = make_path_posix(path)
48
+ prefix, *bits = path.rsplit("/", self.directory_levels + 1)
49
+ if bits:
50
+ return self._separator.join(bits)
51
+ else:
52
+ return prefix # No separator found, simple filename
53
+
54
+ def __eq__(self, other: object) -> bool:
55
+ return super().__eq__(other) and self.directory_levels == other.directory_levels
56
+
57
+ def __hash__(self) -> int:
58
+ return super().__hash__() ^ hash(self.directory_levels)
59
+
60
+
61
+ class HashCacheMapper(AbstractCacheMapper):
62
+ """Cache mapper that uses a hash of the remote URL."""
63
+
64
+ def __call__(self, path: str) -> str:
65
+ return hashlib.sha256(path.encode()).hexdigest()
66
+
67
+
68
+ def create_cache_mapper(same_names: bool) -> AbstractCacheMapper:
69
+ """Factory method to create cache mapper for backward compatibility with
70
+ ``CachingFileSystem`` constructor using ``same_names`` kwarg.
71
+ """
72
+ if same_names:
73
+ return BasenameCacheMapper()
74
+ else:
75
+ return HashCacheMapper()
tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/cache_metadata.py ADDED
@@ -0,0 +1,233 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ import pickle
5
+ import time
6
+ from typing import TYPE_CHECKING
7
+
8
+ from fsspec.utils import atomic_write
9
+
10
+ try:
11
+ import ujson as json
12
+ except ImportError:
13
+ if not TYPE_CHECKING:
14
+ import json
15
+
16
+ if TYPE_CHECKING:
17
+ from collections.abc import Iterator
18
+ from typing import Any, Literal
19
+
20
+ from typing_extensions import TypeAlias
21
+
22
+ from .cached import CachingFileSystem
23
+
24
+ Detail: TypeAlias = dict[str, Any]
25
+
26
+
27
+ class CacheMetadata:
28
+ """Cache metadata.
29
+
30
+ All reading and writing of cache metadata is performed by this class,
31
+ accessing the cached files and blocks is not.
32
+
33
+ Metadata is stored in a single file per storage directory in JSON format.
34
+ For backward compatibility, also reads metadata stored in pickle format
35
+ which is converted to JSON when next saved.
36
+ """
37
+
38
+ def __init__(self, storage: list[str]):
39
+ """
40
+
41
+ Parameters
42
+ ----------
43
+ storage: list[str]
44
+ Directories containing cached files, must be at least one. Metadata
45
+ is stored in the last of these directories by convention.
46
+ """
47
+ if not storage:
48
+ raise ValueError("CacheMetadata expects at least one storage location")
49
+
50
+ self._storage = storage
51
+ self.cached_files: list[Detail] = [{}]
52
+
53
+ # Private attribute to force saving of metadata in pickle format rather than
54
+ # JSON for use in tests to confirm can read both pickle and JSON formats.
55
+ self._force_save_pickle = False
56
+
57
+ def _load(self, fn: str) -> Detail:
58
+ """Low-level function to load metadata from specific file"""
59
+ try:
60
+ with open(fn, "r") as f:
61
+ loaded = json.load(f)
62
+ except ValueError:
63
+ with open(fn, "rb") as f:
64
+ loaded = pickle.load(f)
65
+ for c in loaded.values():
66
+ if isinstance(c.get("blocks"), list):
67
+ c["blocks"] = set(c["blocks"])
68
+ return loaded
69
+
70
+ def _save(self, metadata_to_save: Detail, fn: str) -> None:
71
+ """Low-level function to save metadata to specific file"""
72
+ if self._force_save_pickle:
73
+ with atomic_write(fn) as f:
74
+ pickle.dump(metadata_to_save, f)
75
+ else:
76
+ with atomic_write(fn, mode="w") as f:
77
+ json.dump(metadata_to_save, f)
78
+
79
+ def _scan_locations(
80
+ self, writable_only: bool = False
81
+ ) -> Iterator[tuple[str, str, bool]]:
82
+ """Yield locations (filenames) where metadata is stored, and whether
83
+ writable or not.
84
+
85
+ Parameters
86
+ ----------
87
+ writable: bool
88
+ Set to True to only yield writable locations.
89
+
90
+ Returns
91
+ -------
92
+ Yields (str, str, bool)
93
+ """
94
+ n = len(self._storage)
95
+ for i, storage in enumerate(self._storage):
96
+ writable = i == n - 1
97
+ if writable_only and not writable:
98
+ continue
99
+ yield os.path.join(storage, "cache"), storage, writable
100
+
101
+ def check_file(
102
+ self, path: str, cfs: CachingFileSystem | None
103
+ ) -> Literal[False] | tuple[Detail, str]:
104
+ """If path is in cache return its details, otherwise return ``False``.
105
+
106
+ If the optional CachingFileSystem is specified then it is used to
107
+ perform extra checks to reject possible matches, such as if they are
108
+ too old.
109
+ """
110
+ for (fn, base, _), cache in zip(self._scan_locations(), self.cached_files):
111
+ if path not in cache:
112
+ continue
113
+ detail = cache[path].copy()
114
+
115
+ if cfs is not None:
116
+ if cfs.check_files and detail["uid"] != cfs.fs.ukey(path):
117
+ # Wrong file as determined by hash of file properties
118
+ continue
119
+ if cfs.expiry and time.time() - detail["time"] > cfs.expiry:
120
+ # Cached file has expired
121
+ continue
122
+
123
+ fn = os.path.join(base, detail["fn"])
124
+ if os.path.exists(fn):
125
+ return detail, fn
126
+ return False
127
+
128
+ def clear_expired(self, expiry_time: int) -> tuple[list[str], bool]:
129
+ """Remove expired metadata from the cache.
130
+
131
+ Returns names of files corresponding to expired metadata and a boolean
132
+ flag indicating whether the writable cache is empty. Caller is
133
+ responsible for deleting the expired files.
134
+ """
135
+ expired_files = []
136
+ for path, detail in self.cached_files[-1].copy().items():
137
+ if time.time() - detail["time"] > expiry_time:
138
+ fn = detail.get("fn", "")
139
+ if not fn:
140
+ raise RuntimeError(
141
+ f"Cache metadata does not contain 'fn' for {path}"
142
+ )
143
+ fn = os.path.join(self._storage[-1], fn)
144
+ expired_files.append(fn)
145
+ self.cached_files[-1].pop(path)
146
+
147
+ if self.cached_files[-1]:
148
+ cache_path = os.path.join(self._storage[-1], "cache")
149
+ self._save(self.cached_files[-1], cache_path)
150
+
151
+ writable_cache_empty = not self.cached_files[-1]
152
+ return expired_files, writable_cache_empty
153
+
154
+ def load(self) -> None:
155
+ """Load all metadata from disk and store in ``self.cached_files``"""
156
+ cached_files = []
157
+ for fn, _, _ in self._scan_locations():
158
+ if os.path.exists(fn):
159
+ # TODO: consolidate blocks here
160
+ cached_files.append(self._load(fn))
161
+ else:
162
+ cached_files.append({})
163
+ self.cached_files = cached_files or [{}]
164
+
165
+ def on_close_cached_file(self, f: Any, path: str) -> None:
166
+ """Perform side-effect actions on closing a cached file.
167
+
168
+ The actual closing of the file is the responsibility of the caller.
169
+ """
170
+ # File must be writeble, so in self.cached_files[-1]
171
+ c = self.cached_files[-1][path]
172
+ if c["blocks"] is not True and len(c["blocks"]) * f.blocksize >= f.size:
173
+ c["blocks"] = True
174
+
175
+ def pop_file(self, path: str) -> str | None:
176
+ """Remove metadata of cached file.
177
+
178
+ If path is in the cache, return the filename of the cached file,
179
+ otherwise return ``None``. Caller is responsible for deleting the
180
+ cached file.
181
+ """
182
+ details = self.check_file(path, None)
183
+ if not details:
184
+ return None
185
+ _, fn = details
186
+ if fn.startswith(self._storage[-1]):
187
+ self.cached_files[-1].pop(path)
188
+ self.save()
189
+ else:
190
+ raise PermissionError(
191
+ "Can only delete cached file in last, writable cache location"
192
+ )
193
+ return fn
194
+
195
+ def save(self) -> None:
196
+ """Save metadata to disk"""
197
+ for (fn, _, writable), cache in zip(self._scan_locations(), self.cached_files):
198
+ if not writable:
199
+ continue
200
+
201
+ if os.path.exists(fn):
202
+ cached_files = self._load(fn)
203
+ for k, c in cached_files.items():
204
+ if k in cache:
205
+ if c["blocks"] is True or cache[k]["blocks"] is True:
206
+ c["blocks"] = True
207
+ else:
208
+ # self.cached_files[*][*]["blocks"] must continue to
209
+ # point to the same set object so that updates
210
+ # performed by MMapCache are propagated back to
211
+ # self.cached_files.
212
+ blocks = cache[k]["blocks"]
213
+ blocks.update(c["blocks"])
214
+ c["blocks"] = blocks
215
+ c["time"] = max(c["time"], cache[k]["time"])
216
+ c["uid"] = cache[k]["uid"]
217
+
218
+ # Files can be added to cache after it was written once
219
+ for k, c in cache.items():
220
+ if k not in cached_files:
221
+ cached_files[k] = c
222
+ else:
223
+ cached_files = cache
224
+ cache = {k: v.copy() for k, v in cached_files.items()}
225
+ for c in cache.values():
226
+ if isinstance(c["blocks"], set):
227
+ c["blocks"] = list(c["blocks"])
228
+ self._save(cache, fn)
229
+ self.cached_files[-1] = cached_files
230
+
231
+ def update_file(self, path: str, detail: Detail) -> None:
232
+ """Update metadata for specific file in memory, do not save"""
233
+ self.cached_files[-1][path] = detail
tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/cached.py ADDED
@@ -0,0 +1,998 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import inspect
4
+ import logging
5
+ import os
6
+ import tempfile
7
+ import time
8
+ import weakref
9
+ from shutil import rmtree
10
+ from typing import TYPE_CHECKING, Any, Callable, ClassVar
11
+
12
+ from fsspec import AbstractFileSystem, filesystem
13
+ from fsspec.callbacks import DEFAULT_CALLBACK
14
+ from fsspec.compression import compr
15
+ from fsspec.core import BaseCache, MMapCache
16
+ from fsspec.exceptions import BlocksizeMismatchError
17
+ from fsspec.implementations.cache_mapper import create_cache_mapper
18
+ from fsspec.implementations.cache_metadata import CacheMetadata
19
+ from fsspec.implementations.local import LocalFileSystem
20
+ from fsspec.spec import AbstractBufferedFile
21
+ from fsspec.transaction import Transaction
22
+ from fsspec.utils import infer_compression
23
+
24
+ if TYPE_CHECKING:
25
+ from fsspec.implementations.cache_mapper import AbstractCacheMapper
26
+
27
+ logger = logging.getLogger("fsspec.cached")
28
+
29
+
30
+ class WriteCachedTransaction(Transaction):
31
+ def complete(self, commit=True):
32
+ rpaths = [f.path for f in self.files]
33
+ lpaths = [f.fn for f in self.files]
34
+ if commit:
35
+ self.fs.put(lpaths, rpaths)
36
+ self.files.clear()
37
+ self.fs._intrans = False
38
+ self.fs._transaction = None
39
+ self.fs = None # break cycle
40
+
41
+
42
+ class CachingFileSystem(AbstractFileSystem):
43
+ """Locally caching filesystem, layer over any other FS
44
+
45
+ This class implements chunk-wise local storage of remote files, for quick
46
+ access after the initial download. The files are stored in a given
47
+ directory with hashes of URLs for the filenames. If no directory is given,
48
+ a temporary one is used, which should be cleaned up by the OS after the
49
+ process ends. The files themselves are sparse (as implemented in
50
+ :class:`~fsspec.caching.MMapCache`), so only the data which is accessed
51
+ takes up space.
52
+
53
+ Restrictions:
54
+
55
+ - the block-size must be the same for each access of a given file, unless
56
+ all blocks of the file have already been read
57
+ - caching can only be applied to file-systems which produce files
58
+ derived from fsspec.spec.AbstractBufferedFile ; LocalFileSystem is also
59
+ allowed, for testing
60
+ """
61
+
62
+ protocol: ClassVar[str | tuple[str, ...]] = ("blockcache", "cached")
63
+
64
+ def __init__(
65
+ self,
66
+ target_protocol=None,
67
+ cache_storage="TMP",
68
+ cache_check=10,
69
+ check_files=False,
70
+ expiry_time=604800,
71
+ target_options=None,
72
+ fs=None,
73
+ same_names: bool | None = None,
74
+ compression=None,
75
+ cache_mapper: AbstractCacheMapper | None = None,
76
+ **kwargs,
77
+ ):
78
+ """
79
+
80
+ Parameters
81
+ ----------
82
+ target_protocol: str (optional)
83
+ Target filesystem protocol. Provide either this or ``fs``.
84
+ cache_storage: str or list(str)
85
+ Location to store files. If "TMP", this is a temporary directory,
86
+ and will be cleaned up by the OS when this process ends (or later).
87
+ If a list, each location will be tried in the order given, but
88
+ only the last will be considered writable.
89
+ cache_check: int
90
+ Number of seconds between reload of cache metadata
91
+ check_files: bool
92
+ Whether to explicitly see if the UID of the remote file matches
93
+ the stored one before using. Warning: some file systems such as
94
+ HTTP cannot reliably give a unique hash of the contents of some
95
+ path, so be sure to set this option to False.
96
+ expiry_time: int
97
+ The time in seconds after which a local copy is considered useless.
98
+ Set to falsy to prevent expiry. The default is equivalent to one
99
+ week.
100
+ target_options: dict or None
101
+ Passed to the instantiation of the FS, if fs is None.
102
+ fs: filesystem instance
103
+ The target filesystem to run against. Provide this or ``protocol``.
104
+ same_names: bool (optional)
105
+ By default, target URLs are hashed using a ``HashCacheMapper`` so
106
+ that files from different backends with the same basename do not
107
+ conflict. If this argument is ``true``, a ``BasenameCacheMapper``
108
+ is used instead. Other cache mapper options are available by using
109
+ the ``cache_mapper`` keyword argument. Only one of this and
110
+ ``cache_mapper`` should be specified.
111
+ compression: str (optional)
112
+ To decompress on download. Can be 'infer' (guess from the URL name),
113
+ one of the entries in ``fsspec.compression.compr``, or None for no
114
+ decompression.
115
+ cache_mapper: AbstractCacheMapper (optional)
116
+ The object use to map from original filenames to cached filenames.
117
+ Only one of this and ``same_names`` should be specified.
118
+ """
119
+ super().__init__(**kwargs)
120
+ if fs is None and target_protocol is None:
121
+ raise ValueError(
122
+ "Please provide filesystem instance(fs) or target_protocol"
123
+ )
124
+ if not (fs is None) ^ (target_protocol is None):
125
+ raise ValueError(
126
+ "Both filesystems (fs) and target_protocol may not be both given."
127
+ )
128
+ if cache_storage == "TMP":
129
+ tempdir = tempfile.mkdtemp()
130
+ storage = [tempdir]
131
+ weakref.finalize(self, self._remove_tempdir, tempdir)
132
+ else:
133
+ if isinstance(cache_storage, str):
134
+ storage = [cache_storage]
135
+ else:
136
+ storage = cache_storage
137
+ os.makedirs(storage[-1], exist_ok=True)
138
+ self.storage = storage
139
+ self.kwargs = target_options or {}
140
+ self.cache_check = cache_check
141
+ self.check_files = check_files
142
+ self.expiry = expiry_time
143
+ self.compression = compression
144
+
145
+ # Size of cache in bytes. If None then the size is unknown and will be
146
+ # recalculated the next time cache_size() is called. On writes to the
147
+ # cache this is reset to None.
148
+ self._cache_size = None
149
+
150
+ if same_names is not None and cache_mapper is not None:
151
+ raise ValueError(
152
+ "Cannot specify both same_names and cache_mapper in "
153
+ "CachingFileSystem.__init__"
154
+ )
155
+ if cache_mapper is not None:
156
+ self._mapper = cache_mapper
157
+ else:
158
+ self._mapper = create_cache_mapper(
159
+ same_names if same_names is not None else False
160
+ )
161
+
162
+ self.target_protocol = (
163
+ target_protocol
164
+ if isinstance(target_protocol, str)
165
+ else (fs.protocol if isinstance(fs.protocol, str) else fs.protocol[0])
166
+ )
167
+ self._metadata = CacheMetadata(self.storage)
168
+ self.load_cache()
169
+ self.fs = fs if fs is not None else filesystem(target_protocol, **self.kwargs)
170
+
171
+ def _strip_protocol(path):
172
+ # acts as a method, since each instance has a difference target
173
+ return self.fs._strip_protocol(type(self)._strip_protocol(path))
174
+
175
+ self._strip_protocol: Callable = _strip_protocol
176
+
177
+ @staticmethod
178
+ def _remove_tempdir(tempdir):
179
+ try:
180
+ rmtree(tempdir)
181
+ except Exception:
182
+ pass
183
+
184
+ def _mkcache(self):
185
+ os.makedirs(self.storage[-1], exist_ok=True)
186
+
187
+ def cache_size(self):
188
+ """Return size of cache in bytes.
189
+
190
+ If more than one cache directory is in use, only the size of the last
191
+ one (the writable cache directory) is returned.
192
+ """
193
+ if self._cache_size is None:
194
+ cache_dir = self.storage[-1]
195
+ self._cache_size = filesystem("file").du(cache_dir, withdirs=True)
196
+ return self._cache_size
197
+
198
+ def load_cache(self):
199
+ """Read set of stored blocks from file"""
200
+ self._metadata.load()
201
+ self._mkcache()
202
+ self.last_cache = time.time()
203
+
204
+ def save_cache(self):
205
+ """Save set of stored blocks from file"""
206
+ self._mkcache()
207
+ self._metadata.save()
208
+ self.last_cache = time.time()
209
+ self._cache_size = None
210
+
211
+ def _check_cache(self):
212
+ """Reload caches if time elapsed or any disappeared"""
213
+ self._mkcache()
214
+ if not self.cache_check:
215
+ # explicitly told not to bother checking
216
+ return
217
+ timecond = time.time() - self.last_cache > self.cache_check
218
+ existcond = all(os.path.exists(storage) for storage in self.storage)
219
+ if timecond or not existcond:
220
+ self.load_cache()
221
+
222
+ def _check_file(self, path):
223
+ """Is path in cache and still valid"""
224
+ path = self._strip_protocol(path)
225
+ self._check_cache()
226
+ return self._metadata.check_file(path, self)
227
+
228
+ def clear_cache(self):
229
+ """Remove all files and metadata from the cache
230
+
231
+ In the case of multiple cache locations, this clears only the last one,
232
+ which is assumed to be the read/write one.
233
+ """
234
+ rmtree(self.storage[-1])
235
+ self.load_cache()
236
+ self._cache_size = None
237
+
238
+ def clear_expired_cache(self, expiry_time=None):
239
+ """Remove all expired files and metadata from the cache
240
+
241
+ In the case of multiple cache locations, this clears only the last one,
242
+ which is assumed to be the read/write one.
243
+
244
+ Parameters
245
+ ----------
246
+ expiry_time: int
247
+ The time in seconds after which a local copy is considered useless.
248
+ If not defined the default is equivalent to the attribute from the
249
+ file caching instantiation.
250
+ """
251
+
252
+ if not expiry_time:
253
+ expiry_time = self.expiry
254
+
255
+ self._check_cache()
256
+
257
+ expired_files, writable_cache_empty = self._metadata.clear_expired(expiry_time)
258
+ for fn in expired_files:
259
+ if os.path.exists(fn):
260
+ os.remove(fn)
261
+
262
+ if writable_cache_empty:
263
+ rmtree(self.storage[-1])
264
+ self.load_cache()
265
+
266
+ self._cache_size = None
267
+
268
+ def pop_from_cache(self, path):
269
+ """Remove cached version of given file
270
+
271
+ Deletes local copy of the given (remote) path. If it is found in a cache
272
+ location which is not the last, it is assumed to be read-only, and
273
+ raises PermissionError
274
+ """
275
+ path = self._strip_protocol(path)
276
+ fn = self._metadata.pop_file(path)
277
+ if fn is not None:
278
+ os.remove(fn)
279
+ self._cache_size = None
280
+
281
+ def _open(
282
+ self,
283
+ path,
284
+ mode="rb",
285
+ block_size=None,
286
+ autocommit=True,
287
+ cache_options=None,
288
+ **kwargs,
289
+ ):
290
+ """Wrap the target _open
291
+
292
+ If the whole file exists in the cache, just open it locally and
293
+ return that.
294
+
295
+ Otherwise, open the file on the target FS, and make it have a mmap
296
+ cache pointing to the location which we determine, in our cache.
297
+ The ``blocks`` instance is shared, so as the mmap cache instance
298
+ updates, so does the entry in our ``cached_files`` attribute.
299
+ We monkey-patch this file, so that when it closes, we call
300
+ ``close_and_update`` to save the state of the blocks.
301
+ """
302
+ path = self._strip_protocol(path)
303
+
304
+ path = self.fs._strip_protocol(path)
305
+ if "r" not in mode:
306
+ return self.fs._open(
307
+ path,
308
+ mode=mode,
309
+ block_size=block_size,
310
+ autocommit=autocommit,
311
+ cache_options=cache_options,
312
+ **kwargs,
313
+ )
314
+ detail = self._check_file(path)
315
+ if detail:
316
+ # file is in cache
317
+ detail, fn = detail
318
+ hash, blocks = detail["fn"], detail["blocks"]
319
+ if blocks is True:
320
+ # stored file is complete
321
+ logger.debug("Opening local copy of %s", path)
322
+ return open(fn, mode)
323
+ # TODO: action where partial file exists in read-only cache
324
+ logger.debug("Opening partially cached copy of %s", path)
325
+ else:
326
+ hash = self._mapper(path)
327
+ fn = os.path.join(self.storage[-1], hash)
328
+ blocks = set()
329
+ detail = {
330
+ "original": path,
331
+ "fn": hash,
332
+ "blocks": blocks,
333
+ "time": time.time(),
334
+ "uid": self.fs.ukey(path),
335
+ }
336
+ self._metadata.update_file(path, detail)
337
+ logger.debug("Creating local sparse file for %s", path)
338
+
339
+ # explicitly submitting the size to the open call will avoid extra
340
+ # operations when opening. This is particularly relevant
341
+ # for any file that is read over a network, e.g. S3.
342
+ size = detail.get("size")
343
+
344
+ # call target filesystems open
345
+ self._mkcache()
346
+ f = self.fs._open(
347
+ path,
348
+ mode=mode,
349
+ block_size=block_size,
350
+ autocommit=autocommit,
351
+ cache_options=cache_options,
352
+ cache_type="none",
353
+ size=size,
354
+ **kwargs,
355
+ )
356
+
357
+ # set size if not already set
358
+ if size is None:
359
+ detail["size"] = f.size
360
+ self._metadata.update_file(path, detail)
361
+
362
+ if self.compression:
363
+ comp = (
364
+ infer_compression(path)
365
+ if self.compression == "infer"
366
+ else self.compression
367
+ )
368
+ f = compr[comp](f, mode="rb")
369
+ if "blocksize" in detail:
370
+ if detail["blocksize"] != f.blocksize:
371
+ raise BlocksizeMismatchError(
372
+ f"Cached file must be reopened with same block"
373
+ f" size as original (old: {detail['blocksize']},"
374
+ f" new {f.blocksize})"
375
+ )
376
+ else:
377
+ detail["blocksize"] = f.blocksize
378
+
379
+ def _fetch_ranges(ranges):
380
+ return self.fs.cat_ranges(
381
+ [path] * len(ranges),
382
+ [r[0] for r in ranges],
383
+ [r[1] for r in ranges],
384
+ **kwargs,
385
+ )
386
+
387
+ multi_fetcher = None if self.compression else _fetch_ranges
388
+ f.cache = MMapCache(
389
+ f.blocksize, f._fetch_range, f.size, fn, blocks, multi_fetcher=multi_fetcher
390
+ )
391
+ close = f.close
392
+ f.close = lambda: self.close_and_update(f, close)
393
+ self.save_cache()
394
+ return f
395
+
396
+ def _parent(self, path):
397
+ return self.fs._parent(path)
398
+
399
+ def hash_name(self, path: str, *args: Any) -> str:
400
+ # Kept for backward compatibility with downstream libraries.
401
+ # Ignores extra arguments, previously same_name boolean.
402
+ return self._mapper(path)
403
+
404
+ def close_and_update(self, f, close):
405
+ """Called when a file is closing, so store the set of blocks"""
406
+ if f.closed:
407
+ return
408
+ path = self._strip_protocol(f.path)
409
+ self._metadata.on_close_cached_file(f, path)
410
+ try:
411
+ logger.debug("going to save")
412
+ self.save_cache()
413
+ logger.debug("saved")
414
+ except OSError:
415
+ logger.debug("Cache saving failed while closing file")
416
+ except NameError:
417
+ logger.debug("Cache save failed due to interpreter shutdown")
418
+ close()
419
+ f.closed = True
420
+
421
+ def ls(self, path, detail=True):
422
+ return self.fs.ls(path, detail)
423
+
424
+ def __getattribute__(self, item):
425
+ if item in {
426
+ "load_cache",
427
+ "_open",
428
+ "save_cache",
429
+ "close_and_update",
430
+ "__init__",
431
+ "__getattribute__",
432
+ "__reduce__",
433
+ "_make_local_details",
434
+ "open",
435
+ "cat",
436
+ "cat_file",
437
+ "_cat_file",
438
+ "cat_ranges",
439
+ "_cat_ranges",
440
+ "get",
441
+ "read_block",
442
+ "tail",
443
+ "head",
444
+ "info",
445
+ "ls",
446
+ "exists",
447
+ "isfile",
448
+ "isdir",
449
+ "_check_file",
450
+ "_check_cache",
451
+ "_mkcache",
452
+ "clear_cache",
453
+ "clear_expired_cache",
454
+ "pop_from_cache",
455
+ "local_file",
456
+ "_paths_from_path",
457
+ "get_mapper",
458
+ "open_many",
459
+ "commit_many",
460
+ "hash_name",
461
+ "__hash__",
462
+ "__eq__",
463
+ "to_json",
464
+ "to_dict",
465
+ "cache_size",
466
+ "pipe_file",
467
+ "pipe",
468
+ "start_transaction",
469
+ "end_transaction",
470
+ }:
471
+ # all the methods defined in this class. Note `open` here, since
472
+ # it calls `_open`, but is actually in superclass
473
+ return lambda *args, **kw: getattr(type(self), item).__get__(self)(
474
+ *args, **kw
475
+ )
476
+ if item in ["__reduce_ex__"]:
477
+ raise AttributeError
478
+ if item in ["transaction"]:
479
+ # property
480
+ return type(self).transaction.__get__(self)
481
+ if item in {"_cache", "transaction_type", "protocol"}:
482
+ # class attributes
483
+ return getattr(type(self), item)
484
+ if item == "__class__":
485
+ return type(self)
486
+ d = object.__getattribute__(self, "__dict__")
487
+ fs = d.get("fs", None) # fs is not immediately defined
488
+ if item in d:
489
+ return d[item]
490
+ elif fs is not None:
491
+ if item in fs.__dict__:
492
+ # attribute of instance
493
+ return fs.__dict__[item]
494
+ # attributed belonging to the target filesystem
495
+ cls = type(fs)
496
+ m = getattr(cls, item)
497
+ if (inspect.isfunction(m) or inspect.isdatadescriptor(m)) and (
498
+ not hasattr(m, "__self__") or m.__self__ is None
499
+ ):
500
+ # instance method
501
+ return m.__get__(fs, cls)
502
+ return m # class method or attribute
503
+ else:
504
+ # attributes of the superclass, while target is being set up
505
+ return super().__getattribute__(item)
506
+
507
+ def __eq__(self, other):
508
+ """Test for equality."""
509
+ if self is other:
510
+ return True
511
+ if not isinstance(other, type(self)):
512
+ return False
513
+ return (
514
+ self.storage == other.storage
515
+ and self.kwargs == other.kwargs
516
+ and self.cache_check == other.cache_check
517
+ and self.check_files == other.check_files
518
+ and self.expiry == other.expiry
519
+ and self.compression == other.compression
520
+ and self._mapper == other._mapper
521
+ and self.target_protocol == other.target_protocol
522
+ )
523
+
524
+ def __hash__(self):
525
+ """Calculate hash."""
526
+ return (
527
+ hash(tuple(self.storage))
528
+ ^ hash(str(self.kwargs))
529
+ ^ hash(self.cache_check)
530
+ ^ hash(self.check_files)
531
+ ^ hash(self.expiry)
532
+ ^ hash(self.compression)
533
+ ^ hash(self._mapper)
534
+ ^ hash(self.target_protocol)
535
+ )
536
+
537
+
538
+ class WholeFileCacheFileSystem(CachingFileSystem):
539
+ """Caches whole remote files on first access
540
+
541
+ This class is intended as a layer over any other file system, and
542
+ will make a local copy of each file accessed, so that all subsequent
543
+ reads are local. This is similar to ``CachingFileSystem``, but without
544
+ the block-wise functionality and so can work even when sparse files
545
+ are not allowed. See its docstring for definition of the init
546
+ arguments.
547
+
548
+ The class still needs access to the remote store for listing files,
549
+ and may refresh cached files.
550
+ """
551
+
552
+ protocol = "filecache"
553
+ local_file = True
554
+
555
+ def open_many(self, open_files, **kwargs):
556
+ paths = [of.path for of in open_files]
557
+ if "r" in open_files.mode:
558
+ self._mkcache()
559
+ else:
560
+ return [
561
+ LocalTempFile(
562
+ self.fs,
563
+ path,
564
+ mode=open_files.mode,
565
+ fn=os.path.join(self.storage[-1], self._mapper(path)),
566
+ **kwargs,
567
+ )
568
+ for path in paths
569
+ ]
570
+
571
+ if self.compression:
572
+ raise NotImplementedError
573
+ details = [self._check_file(sp) for sp in paths]
574
+ downpath = [p for p, d in zip(paths, details) if not d]
575
+ downfn0 = [
576
+ os.path.join(self.storage[-1], self._mapper(p))
577
+ for p, d in zip(paths, details)
578
+ ] # keep these path names for opening later
579
+ downfn = [fn for fn, d in zip(downfn0, details) if not d]
580
+ if downpath:
581
+ # skip if all files are already cached and up to date
582
+ self.fs.get(downpath, downfn)
583
+
584
+ # update metadata - only happens when downloads are successful
585
+ newdetail = [
586
+ {
587
+ "original": path,
588
+ "fn": self._mapper(path),
589
+ "blocks": True,
590
+ "time": time.time(),
591
+ "uid": self.fs.ukey(path),
592
+ }
593
+ for path in downpath
594
+ ]
595
+ for path, detail in zip(downpath, newdetail):
596
+ self._metadata.update_file(path, detail)
597
+ self.save_cache()
598
+
599
+ def firstpart(fn):
600
+ # helper to adapt both whole-file and simple-cache
601
+ return fn[1] if isinstance(fn, tuple) else fn
602
+
603
+ return [
604
+ open(firstpart(fn0) if fn0 else fn1, mode=open_files.mode)
605
+ for fn0, fn1 in zip(details, downfn0)
606
+ ]
607
+
608
+ def commit_many(self, open_files):
609
+ self.fs.put([f.fn for f in open_files], [f.path for f in open_files])
610
+ [f.close() for f in open_files]
611
+ for f in open_files:
612
+ # in case autocommit is off, and so close did not already delete
613
+ try:
614
+ os.remove(f.name)
615
+ except FileNotFoundError:
616
+ pass
617
+ self._cache_size = None
618
+
619
+ def _make_local_details(self, path):
620
+ hash = self._mapper(path)
621
+ fn = os.path.join(self.storage[-1], hash)
622
+ detail = {
623
+ "original": path,
624
+ "fn": hash,
625
+ "blocks": True,
626
+ "time": time.time(),
627
+ "uid": self.fs.ukey(path),
628
+ }
629
+ self._metadata.update_file(path, detail)
630
+ logger.debug("Copying %s to local cache", path)
631
+ return fn
632
+
633
+ def cat(
634
+ self,
635
+ path,
636
+ recursive=False,
637
+ on_error="raise",
638
+ callback=DEFAULT_CALLBACK,
639
+ **kwargs,
640
+ ):
641
+ paths = self.expand_path(
642
+ path, recursive=recursive, maxdepth=kwargs.get("maxdepth")
643
+ )
644
+ getpaths = []
645
+ storepaths = []
646
+ fns = []
647
+ out = {}
648
+ for p in paths.copy():
649
+ try:
650
+ detail = self._check_file(p)
651
+ if not detail:
652
+ fn = self._make_local_details(p)
653
+ getpaths.append(p)
654
+ storepaths.append(fn)
655
+ else:
656
+ detail, fn = detail if isinstance(detail, tuple) else (None, detail)
657
+ fns.append(fn)
658
+ except Exception as e:
659
+ if on_error == "raise":
660
+ raise
661
+ if on_error == "return":
662
+ out[p] = e
663
+ paths.remove(p)
664
+
665
+ if getpaths:
666
+ self.fs.get(getpaths, storepaths)
667
+ self.save_cache()
668
+
669
+ callback.set_size(len(paths))
670
+ for p, fn in zip(paths, fns):
671
+ with open(fn, "rb") as f:
672
+ out[p] = f.read()
673
+ callback.relative_update(1)
674
+ if isinstance(path, str) and len(paths) == 1 and recursive is False:
675
+ out = out[paths[0]]
676
+ return out
677
+
678
+ def _open(self, path, mode="rb", **kwargs):
679
+ path = self._strip_protocol(path)
680
+ if "r" not in mode:
681
+ hash = self._mapper(path)
682
+ fn = os.path.join(self.storage[-1], hash)
683
+ user_specified_kwargs = {
684
+ k: v
685
+ for k, v in kwargs.items()
686
+ # those kwargs were added by open(), we don't want them
687
+ if k not in ["autocommit", "block_size", "cache_options"]
688
+ }
689
+ return LocalTempFile(self, path, mode=mode, fn=fn, **user_specified_kwargs)
690
+ detail = self._check_file(path)
691
+ if detail:
692
+ detail, fn = detail
693
+ _, blocks = detail["fn"], detail["blocks"]
694
+ if blocks is True:
695
+ logger.debug("Opening local copy of %s", path)
696
+
697
+ # In order to support downstream filesystems to be able to
698
+ # infer the compression from the original filename, like
699
+ # the `TarFileSystem`, let's extend the `io.BufferedReader`
700
+ # fileobject protocol by adding a dedicated attribute
701
+ # `original`.
702
+ f = open(fn, mode)
703
+ f.original = detail.get("original")
704
+ return f
705
+ else:
706
+ raise ValueError(
707
+ f"Attempt to open partially cached file {path}"
708
+ f" as a wholly cached file"
709
+ )
710
+ else:
711
+ fn = self._make_local_details(path)
712
+ kwargs["mode"] = mode
713
+
714
+ # call target filesystems open
715
+ self._mkcache()
716
+ if self.compression:
717
+ with self.fs._open(path, **kwargs) as f, open(fn, "wb") as f2:
718
+ if isinstance(f, AbstractBufferedFile):
719
+ # want no type of caching if just downloading whole thing
720
+ f.cache = BaseCache(0, f.cache.fetcher, f.size)
721
+ comp = (
722
+ infer_compression(path)
723
+ if self.compression == "infer"
724
+ else self.compression
725
+ )
726
+ f = compr[comp](f, mode="rb")
727
+ data = True
728
+ while data:
729
+ block = getattr(f, "blocksize", 5 * 2**20)
730
+ data = f.read(block)
731
+ f2.write(data)
732
+ else:
733
+ self.fs.get_file(path, fn)
734
+ self.save_cache()
735
+ return self._open(path, mode)
736
+
737
+
738
+ class SimpleCacheFileSystem(WholeFileCacheFileSystem):
739
+ """Caches whole remote files on first access
740
+
741
+ This class is intended as a layer over any other file system, and
742
+ will make a local copy of each file accessed, so that all subsequent
743
+ reads are local. This implementation only copies whole files, and
744
+ does not keep any metadata about the download time or file details.
745
+ It is therefore safer to use in multi-threaded/concurrent situations.
746
+
747
+ This is the only of the caching filesystems that supports write: you will
748
+ be given a real local open file, and upon close and commit, it will be
749
+ uploaded to the target filesystem; the writability or the target URL is
750
+ not checked until that time.
751
+
752
+ """
753
+
754
+ protocol = "simplecache"
755
+ local_file = True
756
+ transaction_type = WriteCachedTransaction
757
+
758
+ def __init__(self, **kwargs):
759
+ kw = kwargs.copy()
760
+ for key in ["cache_check", "expiry_time", "check_files"]:
761
+ kw[key] = False
762
+ super().__init__(**kw)
763
+ for storage in self.storage:
764
+ if not os.path.exists(storage):
765
+ os.makedirs(storage, exist_ok=True)
766
+
767
+ def _check_file(self, path):
768
+ self._check_cache()
769
+ sha = self._mapper(path)
770
+ for storage in self.storage:
771
+ fn = os.path.join(storage, sha)
772
+ if os.path.exists(fn):
773
+ return fn
774
+
775
+ def save_cache(self):
776
+ pass
777
+
778
+ def load_cache(self):
779
+ pass
780
+
781
+ def pipe_file(self, path, value=None, **kwargs):
782
+ if self._intrans:
783
+ with self.open(path, "wb") as f:
784
+ f.write(value)
785
+ else:
786
+ super().pipe_file(path, value)
787
+
788
+ def ls(self, path, detail=True, **kwargs):
789
+ path = self._strip_protocol(path)
790
+ details = []
791
+ try:
792
+ details = self.fs.ls(
793
+ path, detail=True, **kwargs
794
+ ).copy() # don't edit original!
795
+ except FileNotFoundError as e:
796
+ ex = e
797
+ else:
798
+ ex = None
799
+ if self._intrans:
800
+ path1 = path.rstrip("/") + "/"
801
+ for f in self.transaction.files:
802
+ if f.path == path:
803
+ details.append(
804
+ {"name": path, "size": f.size or f.tell(), "type": "file"}
805
+ )
806
+ elif f.path.startswith(path1):
807
+ if f.path.count("/") == path1.count("/"):
808
+ details.append(
809
+ {"name": f.path, "size": f.size or f.tell(), "type": "file"}
810
+ )
811
+ else:
812
+ dname = "/".join(f.path.split("/")[: path1.count("/") + 1])
813
+ details.append({"name": dname, "size": 0, "type": "directory"})
814
+ if ex is not None and not details:
815
+ raise ex
816
+ if detail:
817
+ return details
818
+ return sorted(_["name"] for _ in details)
819
+
820
+ def info(self, path, **kwargs):
821
+ path = self._strip_protocol(path)
822
+ if self._intrans:
823
+ f = [_ for _ in self.transaction.files if _.path == path]
824
+ if f:
825
+ size = os.path.getsize(f[0].fn) if f[0].closed else f[0].tell()
826
+ return {"name": path, "size": size, "type": "file"}
827
+ f = any(_.path.startswith(path + "/") for _ in self.transaction.files)
828
+ if f:
829
+ return {"name": path, "size": 0, "type": "directory"}
830
+ return self.fs.info(path, **kwargs)
831
+
832
+ def pipe(self, path, value=None, **kwargs):
833
+ if isinstance(path, str):
834
+ self.pipe_file(self._strip_protocol(path), value, **kwargs)
835
+ elif isinstance(path, dict):
836
+ for k, v in path.items():
837
+ self.pipe_file(self._strip_protocol(k), v, **kwargs)
838
+ else:
839
+ raise ValueError("path must be str or dict")
840
+
841
+ async def _cat_file(self, path, start=None, end=None, **kwargs):
842
+ logger.debug("async cat_file %s", path)
843
+ path = self._strip_protocol(path)
844
+ sha = self._mapper(path)
845
+ fn = self._check_file(path)
846
+
847
+ if not fn:
848
+ fn = os.path.join(self.storage[-1], sha)
849
+ await self.fs._get_file(path, fn, **kwargs)
850
+
851
+ with open(fn, "rb") as f: # noqa ASYNC230
852
+ if start:
853
+ f.seek(start)
854
+ size = -1 if end is None else end - f.tell()
855
+ return f.read(size)
856
+
857
+ async def _cat_ranges(
858
+ self, paths, starts, ends, max_gap=None, on_error="return", **kwargs
859
+ ):
860
+ logger.debug("async cat ranges %s", paths)
861
+ lpaths = []
862
+ rset = set()
863
+ download = []
864
+ rpaths = []
865
+ for p in paths:
866
+ fn = self._check_file(p)
867
+ if fn is None and p not in rset:
868
+ sha = self._mapper(p)
869
+ fn = os.path.join(self.storage[-1], sha)
870
+ download.append(fn)
871
+ rset.add(p)
872
+ rpaths.append(p)
873
+ lpaths.append(fn)
874
+ if download:
875
+ await self.fs._get(rpaths, download, on_error=on_error)
876
+
877
+ return LocalFileSystem().cat_ranges(
878
+ lpaths, starts, ends, max_gap=max_gap, on_error=on_error, **kwargs
879
+ )
880
+
881
+ def cat_ranges(
882
+ self, paths, starts, ends, max_gap=None, on_error="return", **kwargs
883
+ ):
884
+ logger.debug("cat ranges %s", paths)
885
+ lpaths = [self._check_file(p) for p in paths]
886
+ rpaths = [p for l, p in zip(lpaths, paths) if l is False]
887
+ lpaths = [l for l, p in zip(lpaths, paths) if l is False]
888
+ self.fs.get(rpaths, lpaths)
889
+ paths = [self._check_file(p) for p in paths]
890
+ return LocalFileSystem().cat_ranges(
891
+ paths, starts, ends, max_gap=max_gap, on_error=on_error, **kwargs
892
+ )
893
+
894
+ def _open(self, path, mode="rb", **kwargs):
895
+ path = self._strip_protocol(path)
896
+ sha = self._mapper(path)
897
+
898
+ if "r" not in mode:
899
+ fn = os.path.join(self.storage[-1], sha)
900
+ user_specified_kwargs = {
901
+ k: v
902
+ for k, v in kwargs.items()
903
+ if k not in ["autocommit", "block_size", "cache_options"]
904
+ } # those were added by open()
905
+ return LocalTempFile(
906
+ self,
907
+ path,
908
+ mode=mode,
909
+ autocommit=not self._intrans,
910
+ fn=fn,
911
+ **user_specified_kwargs,
912
+ )
913
+ fn = self._check_file(path)
914
+ if fn:
915
+ return open(fn, mode)
916
+
917
+ fn = os.path.join(self.storage[-1], sha)
918
+ logger.debug("Copying %s to local cache", path)
919
+ kwargs["mode"] = mode
920
+
921
+ self._mkcache()
922
+ self._cache_size = None
923
+ if self.compression:
924
+ with self.fs._open(path, **kwargs) as f, open(fn, "wb") as f2:
925
+ if isinstance(f, AbstractBufferedFile):
926
+ # want no type of caching if just downloading whole thing
927
+ f.cache = BaseCache(0, f.cache.fetcher, f.size)
928
+ comp = (
929
+ infer_compression(path)
930
+ if self.compression == "infer"
931
+ else self.compression
932
+ )
933
+ f = compr[comp](f, mode="rb")
934
+ data = True
935
+ while data:
936
+ block = getattr(f, "blocksize", 5 * 2**20)
937
+ data = f.read(block)
938
+ f2.write(data)
939
+ else:
940
+ self.fs.get_file(path, fn)
941
+ return self._open(path, mode)
942
+
943
+
944
+ class LocalTempFile:
945
+ """A temporary local file, which will be uploaded on commit"""
946
+
947
+ def __init__(self, fs, path, fn, mode="wb", autocommit=True, seek=0, **kwargs):
948
+ self.fn = fn
949
+ self.fh = open(fn, mode)
950
+ self.mode = mode
951
+ if seek:
952
+ self.fh.seek(seek)
953
+ self.path = path
954
+ self.size = None
955
+ self.fs = fs
956
+ self.closed = False
957
+ self.autocommit = autocommit
958
+ self.kwargs = kwargs
959
+
960
+ def __reduce__(self):
961
+ # always open in r+b to allow continuing writing at a location
962
+ return (
963
+ LocalTempFile,
964
+ (self.fs, self.path, self.fn, "r+b", self.autocommit, self.tell()),
965
+ )
966
+
967
+ def __enter__(self):
968
+ return self.fh
969
+
970
+ def __exit__(self, exc_type, exc_val, exc_tb):
971
+ self.close()
972
+
973
+ def close(self):
974
+ # self.size = self.fh.tell()
975
+ if self.closed:
976
+ return
977
+ self.fh.close()
978
+ self.closed = True
979
+ if self.autocommit:
980
+ self.commit()
981
+
982
+ def discard(self):
983
+ self.fh.close()
984
+ os.remove(self.fn)
985
+
986
+ def commit(self):
987
+ self.fs.put(self.fn, self.path, **self.kwargs)
988
+ # we do not delete the local copy, it's still in the cache.
989
+
990
+ @property
991
+ def name(self):
992
+ return self.fn
993
+
994
+ def __repr__(self) -> str:
995
+ return f"LocalTempFile: {self.path}"
996
+
997
+ def __getattr__(self, item):
998
+ return getattr(self.fh, item)
tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/dask.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import dask
2
+ from distributed.client import Client, _get_global_client
3
+ from distributed.worker import Worker
4
+
5
+ from fsspec import filesystem
6
+ from fsspec.spec import AbstractBufferedFile, AbstractFileSystem
7
+ from fsspec.utils import infer_storage_options
8
+
9
+
10
+ def _get_client(client):
11
+ if client is None:
12
+ return _get_global_client()
13
+ elif isinstance(client, Client):
14
+ return client
15
+ else:
16
+ # e.g., connection string
17
+ return Client(client)
18
+
19
+
20
+ def _in_worker():
21
+ return bool(Worker._instances)
22
+
23
+
24
+ class DaskWorkerFileSystem(AbstractFileSystem):
25
+ """View files accessible to a worker as any other remote file-system
26
+
27
+ When instances are run on the worker, uses the real filesystem. When
28
+ run on the client, they call the worker to provide information or data.
29
+
30
+ **Warning** this implementation is experimental, and read-only for now.
31
+ """
32
+
33
+ def __init__(
34
+ self, target_protocol=None, target_options=None, fs=None, client=None, **kwargs
35
+ ):
36
+ super().__init__(**kwargs)
37
+ if not (fs is None) ^ (target_protocol is None):
38
+ raise ValueError(
39
+ "Please provide one of filesystem instance (fs) or"
40
+ " target_protocol, not both"
41
+ )
42
+ self.target_protocol = target_protocol
43
+ self.target_options = target_options
44
+ self.worker = None
45
+ self.client = client
46
+ self.fs = fs
47
+ self._determine_worker()
48
+
49
+ @staticmethod
50
+ def _get_kwargs_from_urls(path):
51
+ so = infer_storage_options(path)
52
+ if "host" in so and "port" in so:
53
+ return {"client": f"{so['host']}:{so['port']}"}
54
+ else:
55
+ return {}
56
+
57
+ def _determine_worker(self):
58
+ if _in_worker():
59
+ self.worker = True
60
+ if self.fs is None:
61
+ self.fs = filesystem(
62
+ self.target_protocol, **(self.target_options or {})
63
+ )
64
+ else:
65
+ self.worker = False
66
+ self.client = _get_client(self.client)
67
+ self.rfs = dask.delayed(self)
68
+
69
+ def mkdir(self, *args, **kwargs):
70
+ if self.worker:
71
+ self.fs.mkdir(*args, **kwargs)
72
+ else:
73
+ self.rfs.mkdir(*args, **kwargs).compute()
74
+
75
+ def rm(self, *args, **kwargs):
76
+ if self.worker:
77
+ self.fs.rm(*args, **kwargs)
78
+ else:
79
+ self.rfs.rm(*args, **kwargs).compute()
80
+
81
+ def copy(self, *args, **kwargs):
82
+ if self.worker:
83
+ self.fs.copy(*args, **kwargs)
84
+ else:
85
+ self.rfs.copy(*args, **kwargs).compute()
86
+
87
+ def mv(self, *args, **kwargs):
88
+ if self.worker:
89
+ self.fs.mv(*args, **kwargs)
90
+ else:
91
+ self.rfs.mv(*args, **kwargs).compute()
92
+
93
+ def ls(self, *args, **kwargs):
94
+ if self.worker:
95
+ return self.fs.ls(*args, **kwargs)
96
+ else:
97
+ return self.rfs.ls(*args, **kwargs).compute()
98
+
99
+ def _open(
100
+ self,
101
+ path,
102
+ mode="rb",
103
+ block_size=None,
104
+ autocommit=True,
105
+ cache_options=None,
106
+ **kwargs,
107
+ ):
108
+ if self.worker:
109
+ return self.fs._open(
110
+ path,
111
+ mode=mode,
112
+ block_size=block_size,
113
+ autocommit=autocommit,
114
+ cache_options=cache_options,
115
+ **kwargs,
116
+ )
117
+ else:
118
+ return DaskFile(
119
+ fs=self,
120
+ path=path,
121
+ mode=mode,
122
+ block_size=block_size,
123
+ autocommit=autocommit,
124
+ cache_options=cache_options,
125
+ **kwargs,
126
+ )
127
+
128
+ def fetch_range(self, path, mode, start, end):
129
+ if self.worker:
130
+ with self._open(path, mode) as f:
131
+ f.seek(start)
132
+ return f.read(end - start)
133
+ else:
134
+ return self.rfs.fetch_range(path, mode, start, end).compute()
135
+
136
+
137
+ class DaskFile(AbstractBufferedFile):
138
+ def __init__(self, mode="rb", **kwargs):
139
+ if mode != "rb":
140
+ raise ValueError('Remote dask files can only be opened in "rb" mode')
141
+ super().__init__(**kwargs)
142
+
143
+ def _upload_chunk(self, final=False):
144
+ pass
145
+
146
+ def _initiate_upload(self):
147
+ """Create remote file/upload"""
148
+ pass
149
+
150
+ def _fetch_range(self, start, end):
151
+ """Get the specified set of bytes from remote"""
152
+ return self.fs.fetch_range(self.path, self.mode, start, end)
tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/data.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ import io
3
+ from typing import Optional
4
+ from urllib.parse import unquote
5
+
6
+ from fsspec import AbstractFileSystem
7
+
8
+
9
+ class DataFileSystem(AbstractFileSystem):
10
+ """A handy decoder for data-URLs
11
+
12
+ Example
13
+ -------
14
+ >>> with fsspec.open("data:,Hello%2C%20World%21") as f:
15
+ ... print(f.read())
16
+ b"Hello, World!"
17
+
18
+ See https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URLs
19
+ """
20
+
21
+ protocol = "data"
22
+
23
+ def __init__(self, **kwargs):
24
+ """No parameters for this filesystem"""
25
+ super().__init__(**kwargs)
26
+
27
+ def cat_file(self, path, start=None, end=None, **kwargs):
28
+ pref, data = path.split(",", 1)
29
+ if pref.endswith("base64"):
30
+ return base64.b64decode(data)[start:end]
31
+ return unquote(data).encode()[start:end]
32
+
33
+ def info(self, path, **kwargs):
34
+ pref, name = path.split(",", 1)
35
+ data = self.cat_file(path)
36
+ mime = pref.split(":", 1)[1].split(";", 1)[0]
37
+ return {"name": name, "size": len(data), "type": "file", "mimetype": mime}
38
+
39
+ def _open(
40
+ self,
41
+ path,
42
+ mode="rb",
43
+ block_size=None,
44
+ autocommit=True,
45
+ cache_options=None,
46
+ **kwargs,
47
+ ):
48
+ if "r" not in mode:
49
+ raise ValueError("Read only filesystem")
50
+ return io.BytesIO(self.cat_file(path))
51
+
52
+ @staticmethod
53
+ def encode(data: bytes, mime: Optional[str] = None):
54
+ """Format the given data into data-URL syntax
55
+
56
+ This version always base64 encodes, even when the data is ascii/url-safe.
57
+ """
58
+ return f"data:{mime or ''};base64,{base64.b64encode(data).decode()}"
tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/dbfs.py ADDED
@@ -0,0 +1,496 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import base64
4
+ import urllib
5
+
6
+ import requests
7
+ from requests.adapters import HTTPAdapter, Retry
8
+ from typing_extensions import override
9
+
10
+ from fsspec import AbstractFileSystem
11
+ from fsspec.spec import AbstractBufferedFile
12
+
13
+
14
+ class DatabricksException(Exception):
15
+ """
16
+ Helper class for exceptions raised in this module.
17
+ """
18
+
19
+ def __init__(self, error_code, message, details=None):
20
+ """Create a new DatabricksException"""
21
+ super().__init__(message)
22
+
23
+ self.error_code = error_code
24
+ self.message = message
25
+ self.details = details
26
+
27
+
28
+ class DatabricksFileSystem(AbstractFileSystem):
29
+ """
30
+ Get access to the Databricks filesystem implementation over HTTP.
31
+ Can be used inside and outside of a databricks cluster.
32
+ """
33
+
34
+ def __init__(self, instance, token, **kwargs):
35
+ """
36
+ Create a new DatabricksFileSystem.
37
+
38
+ Parameters
39
+ ----------
40
+ instance: str
41
+ The instance URL of the databricks cluster.
42
+ For example for an Azure databricks cluster, this
43
+ has the form adb-<some-number>.<two digits>.azuredatabricks.net.
44
+ token: str
45
+ Your personal token. Find out more
46
+ here: https://docs.databricks.com/dev-tools/api/latest/authentication.html
47
+ """
48
+ self.instance = instance
49
+ self.token = token
50
+ self.session = requests.Session()
51
+ self.retries = Retry(
52
+ total=10,
53
+ backoff_factor=0.05,
54
+ status_forcelist=[408, 429, 500, 502, 503, 504],
55
+ )
56
+
57
+ self.session.mount("https://", HTTPAdapter(max_retries=self.retries))
58
+ self.session.headers.update({"Authorization": f"Bearer {self.token}"})
59
+
60
+ super().__init__(**kwargs)
61
+
62
+ @override
63
+ def _ls_from_cache(self, path) -> list[dict[str, str | int]] | None:
64
+ """Check cache for listing
65
+
66
+ Returns listing, if found (may be empty list for a directory that
67
+ exists but contains nothing), None if not in cache.
68
+ """
69
+ self.dircache.pop(path.rstrip("/"), None)
70
+
71
+ parent = self._parent(path)
72
+ if parent in self.dircache:
73
+ for entry in self.dircache[parent]:
74
+ if entry["name"] == path.rstrip("/"):
75
+ if entry["type"] != "directory":
76
+ return [entry]
77
+ return []
78
+ raise FileNotFoundError(path)
79
+
80
+ def ls(self, path, detail=True, **kwargs):
81
+ """
82
+ List the contents of the given path.
83
+
84
+ Parameters
85
+ ----------
86
+ path: str
87
+ Absolute path
88
+ detail: bool
89
+ Return not only the list of filenames,
90
+ but also additional information on file sizes
91
+ and types.
92
+ """
93
+ try:
94
+ out = self._ls_from_cache(path)
95
+ except FileNotFoundError:
96
+ # This happens if the `path`'s parent was cached, but `path` is not
97
+ # there. This suggests that `path` is new since the parent was
98
+ # cached. Attempt to invalidate parent's cache before continuing.
99
+ self.dircache.pop(self._parent(path), None)
100
+ out = None
101
+
102
+ if not out:
103
+ try:
104
+ r = self._send_to_api(
105
+ method="get", endpoint="list", json={"path": path}
106
+ )
107
+ except DatabricksException as e:
108
+ if e.error_code == "RESOURCE_DOES_NOT_EXIST":
109
+ raise FileNotFoundError(e.message) from e
110
+
111
+ raise
112
+ files = r.get("files", [])
113
+ out = [
114
+ {
115
+ "name": o["path"],
116
+ "type": "directory" if o["is_dir"] else "file",
117
+ "size": o["file_size"],
118
+ }
119
+ for o in files
120
+ ]
121
+ self.dircache[path] = out
122
+
123
+ if detail:
124
+ return out
125
+ return [o["name"] for o in out]
126
+
127
+ def makedirs(self, path, exist_ok=True):
128
+ """
129
+ Create a given absolute path and all of its parents.
130
+
131
+ Parameters
132
+ ----------
133
+ path: str
134
+ Absolute path to create
135
+ exist_ok: bool
136
+ If false, checks if the folder
137
+ exists before creating it (and raises an
138
+ Exception if this is the case)
139
+ """
140
+ if not exist_ok:
141
+ try:
142
+ # If the following succeeds, the path is already present
143
+ self._send_to_api(
144
+ method="get", endpoint="get-status", json={"path": path}
145
+ )
146
+ raise FileExistsError(f"Path {path} already exists")
147
+ except DatabricksException as e:
148
+ if e.error_code == "RESOURCE_DOES_NOT_EXIST":
149
+ pass
150
+
151
+ try:
152
+ self._send_to_api(method="post", endpoint="mkdirs", json={"path": path})
153
+ except DatabricksException as e:
154
+ if e.error_code == "RESOURCE_ALREADY_EXISTS":
155
+ raise FileExistsError(e.message) from e
156
+
157
+ raise
158
+ self.invalidate_cache(self._parent(path))
159
+
160
+ def mkdir(self, path, create_parents=True, **kwargs):
161
+ """
162
+ Create a given absolute path and all of its parents.
163
+
164
+ Parameters
165
+ ----------
166
+ path: str
167
+ Absolute path to create
168
+ create_parents: bool
169
+ Whether to create all parents or not.
170
+ "False" is not implemented so far.
171
+ """
172
+ if not create_parents:
173
+ raise NotImplementedError
174
+
175
+ self.mkdirs(path, **kwargs)
176
+
177
+ def rm(self, path, recursive=False, **kwargs):
178
+ """
179
+ Remove the file or folder at the given absolute path.
180
+
181
+ Parameters
182
+ ----------
183
+ path: str
184
+ Absolute path what to remove
185
+ recursive: bool
186
+ Recursively delete all files in a folder.
187
+ """
188
+ try:
189
+ self._send_to_api(
190
+ method="post",
191
+ endpoint="delete",
192
+ json={"path": path, "recursive": recursive},
193
+ )
194
+ except DatabricksException as e:
195
+ # This is not really an exception, it just means
196
+ # not everything was deleted so far
197
+ if e.error_code == "PARTIAL_DELETE":
198
+ self.rm(path=path, recursive=recursive)
199
+ elif e.error_code == "IO_ERROR":
200
+ # Using the same exception as the os module would use here
201
+ raise OSError(e.message) from e
202
+
203
+ raise
204
+ self.invalidate_cache(self._parent(path))
205
+
206
+ def mv(
207
+ self, source_path, destination_path, recursive=False, maxdepth=None, **kwargs
208
+ ):
209
+ """
210
+ Move a source to a destination path.
211
+
212
+ A note from the original [databricks API manual]
213
+ (https://docs.databricks.com/dev-tools/api/latest/dbfs.html#move).
214
+
215
+ When moving a large number of files the API call will time out after
216
+ approximately 60s, potentially resulting in partially moved data.
217
+ Therefore, for operations that move more than 10k files, we strongly
218
+ discourage using the DBFS REST API.
219
+
220
+ Parameters
221
+ ----------
222
+ source_path: str
223
+ From where to move (absolute path)
224
+ destination_path: str
225
+ To where to move (absolute path)
226
+ recursive: bool
227
+ Not implemented to far.
228
+ maxdepth:
229
+ Not implemented to far.
230
+ """
231
+ if recursive:
232
+ raise NotImplementedError
233
+ if maxdepth:
234
+ raise NotImplementedError
235
+
236
+ try:
237
+ self._send_to_api(
238
+ method="post",
239
+ endpoint="move",
240
+ json={"source_path": source_path, "destination_path": destination_path},
241
+ )
242
+ except DatabricksException as e:
243
+ if e.error_code == "RESOURCE_DOES_NOT_EXIST":
244
+ raise FileNotFoundError(e.message) from e
245
+ elif e.error_code == "RESOURCE_ALREADY_EXISTS":
246
+ raise FileExistsError(e.message) from e
247
+
248
+ raise
249
+ self.invalidate_cache(self._parent(source_path))
250
+ self.invalidate_cache(self._parent(destination_path))
251
+
252
+ def _open(self, path, mode="rb", block_size="default", **kwargs):
253
+ """
254
+ Overwrite the base class method to make sure to create a DBFile.
255
+ All arguments are copied from the base method.
256
+
257
+ Only the default blocksize is allowed.
258
+ """
259
+ return DatabricksFile(self, path, mode=mode, block_size=block_size, **kwargs)
260
+
261
+ def _send_to_api(self, method, endpoint, json):
262
+ """
263
+ Send the given json to the DBFS API
264
+ using a get or post request (specified by the argument `method`).
265
+
266
+ Parameters
267
+ ----------
268
+ method: str
269
+ Which http method to use for communication; "get" or "post".
270
+ endpoint: str
271
+ Where to send the request to (last part of the API URL)
272
+ json: dict
273
+ Dictionary of information to send
274
+ """
275
+ if method == "post":
276
+ session_call = self.session.post
277
+ elif method == "get":
278
+ session_call = self.session.get
279
+ else:
280
+ raise ValueError(f"Do not understand method {method}")
281
+
282
+ url = urllib.parse.urljoin(f"https://{self.instance}/api/2.0/dbfs/", endpoint)
283
+
284
+ r = session_call(url, json=json)
285
+
286
+ # The DBFS API will return a json, also in case of an exception.
287
+ # We want to preserve this information as good as possible.
288
+ try:
289
+ r.raise_for_status()
290
+ except requests.HTTPError as e:
291
+ # try to extract json error message
292
+ # if that fails, fall back to the original exception
293
+ try:
294
+ exception_json = e.response.json()
295
+ except Exception:
296
+ raise e from None
297
+
298
+ raise DatabricksException(**exception_json) from e
299
+
300
+ return r.json()
301
+
302
+ def _create_handle(self, path, overwrite=True):
303
+ """
304
+ Internal function to create a handle, which can be used to
305
+ write blocks of a file to DBFS.
306
+ A handle has a unique identifier which needs to be passed
307
+ whenever written during this transaction.
308
+ The handle is active for 10 minutes - after that a new
309
+ write transaction needs to be created.
310
+ Make sure to close the handle after you are finished.
311
+
312
+ Parameters
313
+ ----------
314
+ path: str
315
+ Absolute path for this file.
316
+ overwrite: bool
317
+ If a file already exist at this location, either overwrite
318
+ it or raise an exception.
319
+ """
320
+ try:
321
+ r = self._send_to_api(
322
+ method="post",
323
+ endpoint="create",
324
+ json={"path": path, "overwrite": overwrite},
325
+ )
326
+ return r["handle"]
327
+ except DatabricksException as e:
328
+ if e.error_code == "RESOURCE_ALREADY_EXISTS":
329
+ raise FileExistsError(e.message) from e
330
+
331
+ raise
332
+
333
+ def _close_handle(self, handle):
334
+ """
335
+ Close a handle, which was opened by :func:`_create_handle`.
336
+
337
+ Parameters
338
+ ----------
339
+ handle: str
340
+ Which handle to close.
341
+ """
342
+ try:
343
+ self._send_to_api(method="post", endpoint="close", json={"handle": handle})
344
+ except DatabricksException as e:
345
+ if e.error_code == "RESOURCE_DOES_NOT_EXIST":
346
+ raise FileNotFoundError(e.message) from e
347
+
348
+ raise
349
+
350
+ def _add_data(self, handle, data):
351
+ """
352
+ Upload data to an already opened file handle
353
+ (opened by :func:`_create_handle`).
354
+ The maximal allowed data size is 1MB after
355
+ conversion to base64.
356
+ Remember to close the handle when you are finished.
357
+
358
+ Parameters
359
+ ----------
360
+ handle: str
361
+ Which handle to upload data to.
362
+ data: bytes
363
+ Block of data to add to the handle.
364
+ """
365
+ data = base64.b64encode(data).decode()
366
+ try:
367
+ self._send_to_api(
368
+ method="post",
369
+ endpoint="add-block",
370
+ json={"handle": handle, "data": data},
371
+ )
372
+ except DatabricksException as e:
373
+ if e.error_code == "RESOURCE_DOES_NOT_EXIST":
374
+ raise FileNotFoundError(e.message) from e
375
+ elif e.error_code == "MAX_BLOCK_SIZE_EXCEEDED":
376
+ raise ValueError(e.message) from e
377
+
378
+ raise
379
+
380
+ def _get_data(self, path, start, end):
381
+ """
382
+ Download data in bytes from a given absolute path in a block
383
+ from [start, start+length].
384
+ The maximum number of allowed bytes to read is 1MB.
385
+
386
+ Parameters
387
+ ----------
388
+ path: str
389
+ Absolute path to download data from
390
+ start: int
391
+ Start position of the block
392
+ end: int
393
+ End position of the block
394
+ """
395
+ try:
396
+ r = self._send_to_api(
397
+ method="get",
398
+ endpoint="read",
399
+ json={"path": path, "offset": start, "length": end - start},
400
+ )
401
+ return base64.b64decode(r["data"])
402
+ except DatabricksException as e:
403
+ if e.error_code == "RESOURCE_DOES_NOT_EXIST":
404
+ raise FileNotFoundError(e.message) from e
405
+ elif e.error_code in ["INVALID_PARAMETER_VALUE", "MAX_READ_SIZE_EXCEEDED"]:
406
+ raise ValueError(e.message) from e
407
+
408
+ raise
409
+
410
+ def invalidate_cache(self, path=None):
411
+ if path is None:
412
+ self.dircache.clear()
413
+ else:
414
+ self.dircache.pop(path, None)
415
+ super().invalidate_cache(path)
416
+
417
+
418
+ class DatabricksFile(AbstractBufferedFile):
419
+ """
420
+ Helper class for files referenced in the DatabricksFileSystem.
421
+ """
422
+
423
+ DEFAULT_BLOCK_SIZE = 1 * 2**20 # only allowed block size
424
+
425
+ def __init__(
426
+ self,
427
+ fs,
428
+ path,
429
+ mode="rb",
430
+ block_size="default",
431
+ autocommit=True,
432
+ cache_type="readahead",
433
+ cache_options=None,
434
+ **kwargs,
435
+ ):
436
+ """
437
+ Create a new instance of the DatabricksFile.
438
+
439
+ The blocksize needs to be the default one.
440
+ """
441
+ if block_size is None or block_size == "default":
442
+ block_size = self.DEFAULT_BLOCK_SIZE
443
+
444
+ assert block_size == self.DEFAULT_BLOCK_SIZE, (
445
+ f"Only the default block size is allowed, not {block_size}"
446
+ )
447
+
448
+ super().__init__(
449
+ fs,
450
+ path,
451
+ mode=mode,
452
+ block_size=block_size,
453
+ autocommit=autocommit,
454
+ cache_type=cache_type,
455
+ cache_options=cache_options or {},
456
+ **kwargs,
457
+ )
458
+
459
+ def _initiate_upload(self):
460
+ """Internal function to start a file upload"""
461
+ self.handle = self.fs._create_handle(self.path)
462
+
463
+ def _upload_chunk(self, final=False):
464
+ """Internal function to add a chunk of data to a started upload"""
465
+ self.buffer.seek(0)
466
+ data = self.buffer.getvalue()
467
+
468
+ data_chunks = [
469
+ data[start:end] for start, end in self._to_sized_blocks(len(data))
470
+ ]
471
+
472
+ for data_chunk in data_chunks:
473
+ self.fs._add_data(handle=self.handle, data=data_chunk)
474
+
475
+ if final:
476
+ self.fs._close_handle(handle=self.handle)
477
+ return True
478
+
479
+ def _fetch_range(self, start, end):
480
+ """Internal function to download a block of data"""
481
+ return_buffer = b""
482
+ length = end - start
483
+ for chunk_start, chunk_end in self._to_sized_blocks(length, start):
484
+ return_buffer += self.fs._get_data(
485
+ path=self.path, start=chunk_start, end=chunk_end
486
+ )
487
+
488
+ return return_buffer
489
+
490
+ def _to_sized_blocks(self, length, start=0):
491
+ """Helper function to split a range from 0 to total_length into blocksizes"""
492
+ end = start + length
493
+ for data_chunk in range(start, end, self.blocksize):
494
+ data_start = data_chunk
495
+ data_end = min(end, data_chunk + self.blocksize)
496
+ yield data_start, data_end
tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/dirfs.py ADDED
@@ -0,0 +1,388 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .. import filesystem
2
+ from ..asyn import AsyncFileSystem
3
+
4
+
5
+ class DirFileSystem(AsyncFileSystem):
6
+ """Directory prefix filesystem
7
+
8
+ The DirFileSystem is a filesystem-wrapper. It assumes every path it is dealing with
9
+ is relative to the `path`. After performing the necessary paths operation it
10
+ delegates everything to the wrapped filesystem.
11
+ """
12
+
13
+ protocol = "dir"
14
+
15
+ def __init__(
16
+ self,
17
+ path=None,
18
+ fs=None,
19
+ fo=None,
20
+ target_protocol=None,
21
+ target_options=None,
22
+ **storage_options,
23
+ ):
24
+ """
25
+ Parameters
26
+ ----------
27
+ path: str
28
+ Path to the directory.
29
+ fs: AbstractFileSystem
30
+ An instantiated filesystem to wrap.
31
+ target_protocol, target_options:
32
+ if fs is none, construct it from these
33
+ fo: str
34
+ Alternate for path; do not provide both
35
+ """
36
+ super().__init__(**storage_options)
37
+ if fs is None:
38
+ fs = filesystem(protocol=target_protocol, **(target_options or {}))
39
+ path = path or fo
40
+
41
+ if self.asynchronous and not fs.async_impl:
42
+ raise ValueError("can't use asynchronous with non-async fs")
43
+
44
+ if fs.async_impl and self.asynchronous != fs.asynchronous:
45
+ raise ValueError("both dirfs and fs should be in the same sync/async mode")
46
+
47
+ self.path = fs._strip_protocol(path)
48
+ self.fs = fs
49
+
50
+ def _join(self, path):
51
+ if isinstance(path, str):
52
+ if not self.path:
53
+ return path
54
+ if not path:
55
+ return self.path
56
+ return self.fs.sep.join((self.path, self._strip_protocol(path)))
57
+ if isinstance(path, dict):
58
+ return {self._join(_path): value for _path, value in path.items()}
59
+ return [self._join(_path) for _path in path]
60
+
61
+ def _relpath(self, path):
62
+ if isinstance(path, str):
63
+ if not self.path:
64
+ return path
65
+ # We need to account for S3FileSystem returning paths that do not
66
+ # start with a '/'
67
+ if path == self.path or (
68
+ self.path.startswith(self.fs.sep) and path == self.path[1:]
69
+ ):
70
+ return ""
71
+ prefix = self.path + self.fs.sep
72
+ if self.path.startswith(self.fs.sep) and not path.startswith(self.fs.sep):
73
+ prefix = prefix[1:]
74
+ assert path.startswith(prefix)
75
+ return path[len(prefix) :]
76
+ return [self._relpath(_path) for _path in path]
77
+
78
+ # Wrappers below
79
+
80
+ @property
81
+ def sep(self):
82
+ return self.fs.sep
83
+
84
+ async def set_session(self, *args, **kwargs):
85
+ return await self.fs.set_session(*args, **kwargs)
86
+
87
+ async def _rm_file(self, path, **kwargs):
88
+ return await self.fs._rm_file(self._join(path), **kwargs)
89
+
90
+ def rm_file(self, path, **kwargs):
91
+ return self.fs.rm_file(self._join(path), **kwargs)
92
+
93
+ async def _rm(self, path, *args, **kwargs):
94
+ return await self.fs._rm(self._join(path), *args, **kwargs)
95
+
96
+ def rm(self, path, *args, **kwargs):
97
+ return self.fs.rm(self._join(path), *args, **kwargs)
98
+
99
+ async def _cp_file(self, path1, path2, **kwargs):
100
+ return await self.fs._cp_file(self._join(path1), self._join(path2), **kwargs)
101
+
102
+ def cp_file(self, path1, path2, **kwargs):
103
+ return self.fs.cp_file(self._join(path1), self._join(path2), **kwargs)
104
+
105
+ async def _copy(
106
+ self,
107
+ path1,
108
+ path2,
109
+ *args,
110
+ **kwargs,
111
+ ):
112
+ return await self.fs._copy(
113
+ self._join(path1),
114
+ self._join(path2),
115
+ *args,
116
+ **kwargs,
117
+ )
118
+
119
+ def copy(self, path1, path2, *args, **kwargs):
120
+ return self.fs.copy(
121
+ self._join(path1),
122
+ self._join(path2),
123
+ *args,
124
+ **kwargs,
125
+ )
126
+
127
+ async def _pipe(self, path, *args, **kwargs):
128
+ return await self.fs._pipe(self._join(path), *args, **kwargs)
129
+
130
+ def pipe(self, path, *args, **kwargs):
131
+ return self.fs.pipe(self._join(path), *args, **kwargs)
132
+
133
+ async def _pipe_file(self, path, *args, **kwargs):
134
+ return await self.fs._pipe_file(self._join(path), *args, **kwargs)
135
+
136
+ def pipe_file(self, path, *args, **kwargs):
137
+ return self.fs.pipe_file(self._join(path), *args, **kwargs)
138
+
139
+ async def _cat_file(self, path, *args, **kwargs):
140
+ return await self.fs._cat_file(self._join(path), *args, **kwargs)
141
+
142
+ def cat_file(self, path, *args, **kwargs):
143
+ return self.fs.cat_file(self._join(path), *args, **kwargs)
144
+
145
+ async def _cat(self, path, *args, **kwargs):
146
+ ret = await self.fs._cat(
147
+ self._join(path),
148
+ *args,
149
+ **kwargs,
150
+ )
151
+
152
+ if isinstance(ret, dict):
153
+ return {self._relpath(key): value for key, value in ret.items()}
154
+
155
+ return ret
156
+
157
+ def cat(self, path, *args, **kwargs):
158
+ ret = self.fs.cat(
159
+ self._join(path),
160
+ *args,
161
+ **kwargs,
162
+ )
163
+
164
+ if isinstance(ret, dict):
165
+ return {self._relpath(key): value for key, value in ret.items()}
166
+
167
+ return ret
168
+
169
+ async def _put_file(self, lpath, rpath, **kwargs):
170
+ return await self.fs._put_file(lpath, self._join(rpath), **kwargs)
171
+
172
+ def put_file(self, lpath, rpath, **kwargs):
173
+ return self.fs.put_file(lpath, self._join(rpath), **kwargs)
174
+
175
+ async def _put(
176
+ self,
177
+ lpath,
178
+ rpath,
179
+ *args,
180
+ **kwargs,
181
+ ):
182
+ return await self.fs._put(
183
+ lpath,
184
+ self._join(rpath),
185
+ *args,
186
+ **kwargs,
187
+ )
188
+
189
+ def put(self, lpath, rpath, *args, **kwargs):
190
+ return self.fs.put(
191
+ lpath,
192
+ self._join(rpath),
193
+ *args,
194
+ **kwargs,
195
+ )
196
+
197
+ async def _get_file(self, rpath, lpath, **kwargs):
198
+ return await self.fs._get_file(self._join(rpath), lpath, **kwargs)
199
+
200
+ def get_file(self, rpath, lpath, **kwargs):
201
+ return self.fs.get_file(self._join(rpath), lpath, **kwargs)
202
+
203
+ async def _get(self, rpath, *args, **kwargs):
204
+ return await self.fs._get(self._join(rpath), *args, **kwargs)
205
+
206
+ def get(self, rpath, *args, **kwargs):
207
+ return self.fs.get(self._join(rpath), *args, **kwargs)
208
+
209
+ async def _isfile(self, path):
210
+ return await self.fs._isfile(self._join(path))
211
+
212
+ def isfile(self, path):
213
+ return self.fs.isfile(self._join(path))
214
+
215
+ async def _isdir(self, path):
216
+ return await self.fs._isdir(self._join(path))
217
+
218
+ def isdir(self, path):
219
+ return self.fs.isdir(self._join(path))
220
+
221
+ async def _size(self, path):
222
+ return await self.fs._size(self._join(path))
223
+
224
+ def size(self, path):
225
+ return self.fs.size(self._join(path))
226
+
227
+ async def _exists(self, path):
228
+ return await self.fs._exists(self._join(path))
229
+
230
+ def exists(self, path):
231
+ return self.fs.exists(self._join(path))
232
+
233
+ async def _info(self, path, **kwargs):
234
+ info = await self.fs._info(self._join(path), **kwargs)
235
+ info = info.copy()
236
+ info["name"] = self._relpath(info["name"])
237
+ return info
238
+
239
+ def info(self, path, **kwargs):
240
+ info = self.fs.info(self._join(path), **kwargs)
241
+ info = info.copy()
242
+ info["name"] = self._relpath(info["name"])
243
+ return info
244
+
245
+ async def _ls(self, path, detail=True, **kwargs):
246
+ ret = (await self.fs._ls(self._join(path), detail=detail, **kwargs)).copy()
247
+ if detail:
248
+ out = []
249
+ for entry in ret:
250
+ entry = entry.copy()
251
+ entry["name"] = self._relpath(entry["name"])
252
+ out.append(entry)
253
+ return out
254
+
255
+ return self._relpath(ret)
256
+
257
+ def ls(self, path, detail=True, **kwargs):
258
+ ret = self.fs.ls(self._join(path), detail=detail, **kwargs).copy()
259
+ if detail:
260
+ out = []
261
+ for entry in ret:
262
+ entry = entry.copy()
263
+ entry["name"] = self._relpath(entry["name"])
264
+ out.append(entry)
265
+ return out
266
+
267
+ return self._relpath(ret)
268
+
269
+ async def _walk(self, path, *args, **kwargs):
270
+ async for root, dirs, files in self.fs._walk(self._join(path), *args, **kwargs):
271
+ yield self._relpath(root), dirs, files
272
+
273
+ def walk(self, path, *args, **kwargs):
274
+ for root, dirs, files in self.fs.walk(self._join(path), *args, **kwargs):
275
+ yield self._relpath(root), dirs, files
276
+
277
+ async def _glob(self, path, **kwargs):
278
+ detail = kwargs.get("detail", False)
279
+ ret = await self.fs._glob(self._join(path), **kwargs)
280
+ if detail:
281
+ return {self._relpath(path): info for path, info in ret.items()}
282
+ return self._relpath(ret)
283
+
284
+ def glob(self, path, **kwargs):
285
+ detail = kwargs.get("detail", False)
286
+ ret = self.fs.glob(self._join(path), **kwargs)
287
+ if detail:
288
+ return {self._relpath(path): info for path, info in ret.items()}
289
+ return self._relpath(ret)
290
+
291
+ async def _du(self, path, *args, **kwargs):
292
+ total = kwargs.get("total", True)
293
+ ret = await self.fs._du(self._join(path), *args, **kwargs)
294
+ if total:
295
+ return ret
296
+
297
+ return {self._relpath(path): size for path, size in ret.items()}
298
+
299
+ def du(self, path, *args, **kwargs):
300
+ total = kwargs.get("total", True)
301
+ ret = self.fs.du(self._join(path), *args, **kwargs)
302
+ if total:
303
+ return ret
304
+
305
+ return {self._relpath(path): size for path, size in ret.items()}
306
+
307
+ async def _find(self, path, *args, **kwargs):
308
+ detail = kwargs.get("detail", False)
309
+ ret = await self.fs._find(self._join(path), *args, **kwargs)
310
+ if detail:
311
+ return {self._relpath(path): info for path, info in ret.items()}
312
+ return self._relpath(ret)
313
+
314
+ def find(self, path, *args, **kwargs):
315
+ detail = kwargs.get("detail", False)
316
+ ret = self.fs.find(self._join(path), *args, **kwargs)
317
+ if detail:
318
+ return {self._relpath(path): info for path, info in ret.items()}
319
+ return self._relpath(ret)
320
+
321
+ async def _expand_path(self, path, *args, **kwargs):
322
+ return self._relpath(
323
+ await self.fs._expand_path(self._join(path), *args, **kwargs)
324
+ )
325
+
326
+ def expand_path(self, path, *args, **kwargs):
327
+ return self._relpath(self.fs.expand_path(self._join(path), *args, **kwargs))
328
+
329
+ async def _mkdir(self, path, *args, **kwargs):
330
+ return await self.fs._mkdir(self._join(path), *args, **kwargs)
331
+
332
+ def mkdir(self, path, *args, **kwargs):
333
+ return self.fs.mkdir(self._join(path), *args, **kwargs)
334
+
335
+ async def _makedirs(self, path, *args, **kwargs):
336
+ return await self.fs._makedirs(self._join(path), *args, **kwargs)
337
+
338
+ def makedirs(self, path, *args, **kwargs):
339
+ return self.fs.makedirs(self._join(path), *args, **kwargs)
340
+
341
+ def rmdir(self, path):
342
+ return self.fs.rmdir(self._join(path))
343
+
344
+ def mv(self, path1, path2, **kwargs):
345
+ return self.fs.mv(
346
+ self._join(path1),
347
+ self._join(path2),
348
+ **kwargs,
349
+ )
350
+
351
+ def touch(self, path, **kwargs):
352
+ return self.fs.touch(self._join(path), **kwargs)
353
+
354
+ def created(self, path):
355
+ return self.fs.created(self._join(path))
356
+
357
+ def modified(self, path):
358
+ return self.fs.modified(self._join(path))
359
+
360
+ def sign(self, path, *args, **kwargs):
361
+ return self.fs.sign(self._join(path), *args, **kwargs)
362
+
363
+ def __repr__(self):
364
+ return f"{self.__class__.__qualname__}(path='{self.path}', fs={self.fs})"
365
+
366
+ def open(
367
+ self,
368
+ path,
369
+ *args,
370
+ **kwargs,
371
+ ):
372
+ return self.fs.open(
373
+ self._join(path),
374
+ *args,
375
+ **kwargs,
376
+ )
377
+
378
+ async def open_async(
379
+ self,
380
+ path,
381
+ *args,
382
+ **kwargs,
383
+ ):
384
+ return await self.fs.open_async(
385
+ self._join(path),
386
+ *args,
387
+ **kwargs,
388
+ )
tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/ftp.py ADDED
@@ -0,0 +1,387 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import uuid
3
+ from ftplib import FTP, FTP_TLS, Error, error_perm
4
+ from typing import Any
5
+
6
+ from ..spec import AbstractBufferedFile, AbstractFileSystem
7
+ from ..utils import infer_storage_options, isfilelike
8
+
9
+
10
+ class FTPFileSystem(AbstractFileSystem):
11
+ """A filesystem over classic FTP"""
12
+
13
+ root_marker = "/"
14
+ cachable = False
15
+ protocol = "ftp"
16
+
17
+ def __init__(
18
+ self,
19
+ host,
20
+ port=21,
21
+ username=None,
22
+ password=None,
23
+ acct=None,
24
+ block_size=None,
25
+ tempdir=None,
26
+ timeout=30,
27
+ encoding="utf-8",
28
+ tls=False,
29
+ **kwargs,
30
+ ):
31
+ """
32
+ You can use _get_kwargs_from_urls to get some kwargs from
33
+ a reasonable FTP url.
34
+
35
+ Authentication will be anonymous if username/password are not
36
+ given.
37
+
38
+ Parameters
39
+ ----------
40
+ host: str
41
+ The remote server name/ip to connect to
42
+ port: int
43
+ Port to connect with
44
+ username: str or None
45
+ If authenticating, the user's identifier
46
+ password: str of None
47
+ User's password on the server, if using
48
+ acct: str or None
49
+ Some servers also need an "account" string for auth
50
+ block_size: int or None
51
+ If given, the read-ahead or write buffer size.
52
+ tempdir: str
53
+ Directory on remote to put temporary files when in a transaction
54
+ timeout: int
55
+ Timeout of the ftp connection in seconds
56
+ encoding: str
57
+ Encoding to use for directories and filenames in FTP connection
58
+ tls: bool
59
+ Use FTP-TLS, by default False
60
+ """
61
+ super().__init__(**kwargs)
62
+ self.host = host
63
+ self.port = port
64
+ self.tempdir = tempdir or "/tmp"
65
+ self.cred = username or "", password or "", acct or ""
66
+ self.timeout = timeout
67
+ self.encoding = encoding
68
+ if block_size is not None:
69
+ self.blocksize = block_size
70
+ else:
71
+ self.blocksize = 2**16
72
+ self.tls = tls
73
+ self._connect()
74
+ if self.tls:
75
+ self.ftp.prot_p()
76
+
77
+ def _connect(self):
78
+ if self.tls:
79
+ ftp_cls = FTP_TLS
80
+ else:
81
+ ftp_cls = FTP
82
+ self.ftp = ftp_cls(timeout=self.timeout, encoding=self.encoding)
83
+ self.ftp.connect(self.host, self.port)
84
+ self.ftp.login(*self.cred)
85
+
86
+ @classmethod
87
+ def _strip_protocol(cls, path):
88
+ return "/" + infer_storage_options(path)["path"].lstrip("/").rstrip("/")
89
+
90
+ @staticmethod
91
+ def _get_kwargs_from_urls(urlpath):
92
+ out = infer_storage_options(urlpath)
93
+ out.pop("path", None)
94
+ out.pop("protocol", None)
95
+ return out
96
+
97
+ def ls(self, path, detail=True, **kwargs):
98
+ path = self._strip_protocol(path)
99
+ out = []
100
+ if path not in self.dircache:
101
+ try:
102
+ try:
103
+ out = [
104
+ (fn, details)
105
+ for (fn, details) in self.ftp.mlsd(path)
106
+ if fn not in [".", ".."]
107
+ and details["type"] not in ["pdir", "cdir"]
108
+ ]
109
+ except error_perm:
110
+ out = _mlsd2(self.ftp, path) # Not platform independent
111
+ for fn, details in out:
112
+ details["name"] = "/".join(
113
+ ["" if path == "/" else path, fn.lstrip("/")]
114
+ )
115
+ if details["type"] == "file":
116
+ details["size"] = int(details["size"])
117
+ else:
118
+ details["size"] = 0
119
+ if details["type"] == "dir":
120
+ details["type"] = "directory"
121
+ self.dircache[path] = out
122
+ except Error:
123
+ try:
124
+ info = self.info(path)
125
+ if info["type"] == "file":
126
+ out = [(path, info)]
127
+ except (Error, IndexError) as exc:
128
+ raise FileNotFoundError(path) from exc
129
+ files = self.dircache.get(path, out)
130
+ if not detail:
131
+ return sorted([fn for fn, details in files])
132
+ return [details for fn, details in files]
133
+
134
+ def info(self, path, **kwargs):
135
+ # implement with direct method
136
+ path = self._strip_protocol(path)
137
+ if path == "/":
138
+ # special case, since this dir has no real entry
139
+ return {"name": "/", "size": 0, "type": "directory"}
140
+ files = self.ls(self._parent(path).lstrip("/"), True)
141
+ try:
142
+ out = next(f for f in files if f["name"] == path)
143
+ except StopIteration as exc:
144
+ raise FileNotFoundError(path) from exc
145
+ return out
146
+
147
+ def get_file(self, rpath, lpath, **kwargs):
148
+ if self.isdir(rpath):
149
+ if not os.path.exists(lpath):
150
+ os.mkdir(lpath)
151
+ return
152
+ if isfilelike(lpath):
153
+ outfile = lpath
154
+ else:
155
+ outfile = open(lpath, "wb")
156
+
157
+ def cb(x):
158
+ outfile.write(x)
159
+
160
+ self.ftp.retrbinary(
161
+ f"RETR {rpath}",
162
+ blocksize=self.blocksize,
163
+ callback=cb,
164
+ )
165
+ if not isfilelike(lpath):
166
+ outfile.close()
167
+
168
+ def cat_file(self, path, start=None, end=None, **kwargs):
169
+ if end is not None:
170
+ return super().cat_file(path, start, end, **kwargs)
171
+ out = []
172
+
173
+ def cb(x):
174
+ out.append(x)
175
+
176
+ try:
177
+ self.ftp.retrbinary(
178
+ f"RETR {path}",
179
+ blocksize=self.blocksize,
180
+ rest=start,
181
+ callback=cb,
182
+ )
183
+ except (Error, error_perm) as orig_exc:
184
+ raise FileNotFoundError(path) from orig_exc
185
+ return b"".join(out)
186
+
187
+ def _open(
188
+ self,
189
+ path,
190
+ mode="rb",
191
+ block_size=None,
192
+ cache_options=None,
193
+ autocommit=True,
194
+ **kwargs,
195
+ ):
196
+ path = self._strip_protocol(path)
197
+ block_size = block_size or self.blocksize
198
+ return FTPFile(
199
+ self,
200
+ path,
201
+ mode=mode,
202
+ block_size=block_size,
203
+ tempdir=self.tempdir,
204
+ autocommit=autocommit,
205
+ cache_options=cache_options,
206
+ )
207
+
208
+ def _rm(self, path):
209
+ path = self._strip_protocol(path)
210
+ self.ftp.delete(path)
211
+ self.invalidate_cache(self._parent(path))
212
+
213
+ def rm(self, path, recursive=False, maxdepth=None):
214
+ paths = self.expand_path(path, recursive=recursive, maxdepth=maxdepth)
215
+ for p in reversed(paths):
216
+ if self.isfile(p):
217
+ self.rm_file(p)
218
+ else:
219
+ self.rmdir(p)
220
+
221
+ def mkdir(self, path: str, create_parents: bool = True, **kwargs: Any) -> None:
222
+ path = self._strip_protocol(path)
223
+ parent = self._parent(path)
224
+ if parent != self.root_marker and not self.exists(parent) and create_parents:
225
+ self.mkdir(parent, create_parents=create_parents)
226
+
227
+ self.ftp.mkd(path)
228
+ self.invalidate_cache(self._parent(path))
229
+
230
+ def makedirs(self, path: str, exist_ok: bool = False) -> None:
231
+ path = self._strip_protocol(path)
232
+ if self.exists(path):
233
+ # NB: "/" does not "exist" as it has no directory entry
234
+ if not exist_ok:
235
+ raise FileExistsError(f"{path} exists without `exist_ok`")
236
+ # exists_ok=True -> no-op
237
+ else:
238
+ self.mkdir(path, create_parents=True)
239
+
240
+ def rmdir(self, path):
241
+ path = self._strip_protocol(path)
242
+ self.ftp.rmd(path)
243
+ self.invalidate_cache(self._parent(path))
244
+
245
+ def mv(self, path1, path2, **kwargs):
246
+ path1 = self._strip_protocol(path1)
247
+ path2 = self._strip_protocol(path2)
248
+ self.ftp.rename(path1, path2)
249
+ self.invalidate_cache(self._parent(path1))
250
+ self.invalidate_cache(self._parent(path2))
251
+
252
+ def __del__(self):
253
+ self.ftp.close()
254
+
255
+ def invalidate_cache(self, path=None):
256
+ if path is None:
257
+ self.dircache.clear()
258
+ else:
259
+ self.dircache.pop(path, None)
260
+ super().invalidate_cache(path)
261
+
262
+
263
+ class TransferDone(Exception):
264
+ """Internal exception to break out of transfer"""
265
+
266
+ pass
267
+
268
+
269
+ class FTPFile(AbstractBufferedFile):
270
+ """Interact with a remote FTP file with read/write buffering"""
271
+
272
+ def __init__(
273
+ self,
274
+ fs,
275
+ path,
276
+ mode="rb",
277
+ block_size="default",
278
+ autocommit=True,
279
+ cache_type="readahead",
280
+ cache_options=None,
281
+ **kwargs,
282
+ ):
283
+ super().__init__(
284
+ fs,
285
+ path,
286
+ mode=mode,
287
+ block_size=block_size,
288
+ autocommit=autocommit,
289
+ cache_type=cache_type,
290
+ cache_options=cache_options,
291
+ **kwargs,
292
+ )
293
+ if not autocommit:
294
+ self.target = self.path
295
+ self.path = "/".join([kwargs["tempdir"], str(uuid.uuid4())])
296
+
297
+ def commit(self):
298
+ self.fs.mv(self.path, self.target)
299
+
300
+ def discard(self):
301
+ self.fs.rm(self.path)
302
+
303
+ def _fetch_range(self, start, end):
304
+ """Get bytes between given byte limits
305
+
306
+ Implemented by raising an exception in the fetch callback when the
307
+ number of bytes received reaches the requested amount.
308
+
309
+ Will fail if the server does not respect the REST command on
310
+ retrieve requests.
311
+ """
312
+ out = []
313
+ total = [0]
314
+
315
+ def callback(x):
316
+ total[0] += len(x)
317
+ if total[0] > end - start:
318
+ out.append(x[: (end - start) - total[0]])
319
+ if end < self.size:
320
+ raise TransferDone
321
+ else:
322
+ out.append(x)
323
+
324
+ if total[0] == end - start and end < self.size:
325
+ raise TransferDone
326
+
327
+ try:
328
+ self.fs.ftp.retrbinary(
329
+ f"RETR {self.path}",
330
+ blocksize=self.blocksize,
331
+ rest=start,
332
+ callback=callback,
333
+ )
334
+ except TransferDone:
335
+ try:
336
+ # stop transfer, we got enough bytes for this block
337
+ self.fs.ftp.abort()
338
+ self.fs.ftp.getmultiline()
339
+ except Error:
340
+ self.fs._connect()
341
+
342
+ return b"".join(out)
343
+
344
+ def _upload_chunk(self, final=False):
345
+ self.buffer.seek(0)
346
+ self.fs.ftp.storbinary(
347
+ f"STOR {self.path}", self.buffer, blocksize=self.blocksize, rest=self.offset
348
+ )
349
+ return True
350
+
351
+
352
+ def _mlsd2(ftp, path="."):
353
+ """
354
+ Fall back to using `dir` instead of `mlsd` if not supported.
355
+
356
+ This parses a Linux style `ls -l` response to `dir`, but the response may
357
+ be platform dependent.
358
+
359
+ Parameters
360
+ ----------
361
+ ftp: ftplib.FTP
362
+ path: str
363
+ Expects to be given path, but defaults to ".".
364
+ """
365
+ lines = []
366
+ minfo = []
367
+ ftp.dir(path, lines.append)
368
+ for line in lines:
369
+ split_line = line.split()
370
+ if len(split_line) < 9:
371
+ continue
372
+ this = (
373
+ split_line[-1],
374
+ {
375
+ "modify": " ".join(split_line[5:8]),
376
+ "unix.owner": split_line[2],
377
+ "unix.group": split_line[3],
378
+ "unix.mode": split_line[0],
379
+ "size": split_line[4],
380
+ },
381
+ )
382
+ if this[1]["unix.mode"][0] == "d":
383
+ this[1]["type"] = "dir"
384
+ else:
385
+ this[1]["type"] = "file"
386
+ minfo.append(this)
387
+ return minfo
tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/gist.py ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+
3
+ from ..spec import AbstractFileSystem
4
+ from ..utils import infer_storage_options
5
+ from .memory import MemoryFile
6
+
7
+
8
+ class GistFileSystem(AbstractFileSystem):
9
+ """
10
+ Interface to files in a single GitHub Gist.
11
+
12
+ Provides read-only access to a gist's files. Gists do not contain
13
+ subdirectories, so file listing is straightforward.
14
+
15
+ Parameters
16
+ ----------
17
+ gist_id : str
18
+ The ID of the gist you want to access (the long hex value from the URL).
19
+ filenames : list[str] (optional)
20
+ If provided, only make a file system representing these files, and do not fetch
21
+ the list of all files for this gist.
22
+ sha : str (optional)
23
+ If provided, fetch a particular revision of the gist. If omitted,
24
+ the latest revision is used.
25
+ username : str (optional)
26
+ GitHub username for authentication (required if token is given).
27
+ token : str (optional)
28
+ GitHub personal access token (required if username is given).
29
+ timeout : (float, float) or float, optional
30
+ Connect and read timeouts for requests (default 60s each).
31
+ kwargs : dict
32
+ Stored on `self.request_kw` and passed to `requests.get` when fetching Gist
33
+ metadata or reading ("opening") a file.
34
+ """
35
+
36
+ protocol = "gist"
37
+ gist_url = "https://api.github.com/gists/{gist_id}"
38
+ gist_rev_url = "https://api.github.com/gists/{gist_id}/{sha}"
39
+
40
+ def __init__(
41
+ self,
42
+ gist_id,
43
+ filenames=None,
44
+ sha=None,
45
+ username=None,
46
+ token=None,
47
+ timeout=None,
48
+ **kwargs,
49
+ ):
50
+ super().__init__()
51
+ self.gist_id = gist_id
52
+ self.filenames = filenames
53
+ self.sha = sha # revision of the gist (optional)
54
+ if (username is None) ^ (token is None):
55
+ # Both or neither must be set
56
+ if username or token:
57
+ raise ValueError("Auth requires both username and token, or neither.")
58
+ self.username = username
59
+ self.token = token
60
+ self.request_kw = kwargs
61
+ # Default timeouts to 60s connect/read if none provided
62
+ self.timeout = timeout if timeout is not None else (60, 60)
63
+
64
+ # We use a single-level "directory" cache, because a gist is essentially flat
65
+ self.dircache[""] = self._fetch_file_list()
66
+
67
+ @property
68
+ def kw(self):
69
+ """Auth parameters passed to 'requests' if we have username/token."""
70
+ if self.username is not None and self.token is not None:
71
+ return {"auth": (self.username, self.token), **self.request_kw}
72
+ return self.request_kw
73
+
74
+ def _fetch_gist_metadata(self):
75
+ """
76
+ Fetch the JSON metadata for this gist (possibly for a specific revision).
77
+ """
78
+ if self.sha:
79
+ url = self.gist_rev_url.format(gist_id=self.gist_id, sha=self.sha)
80
+ else:
81
+ url = self.gist_url.format(gist_id=self.gist_id)
82
+
83
+ r = requests.get(url, timeout=self.timeout, **self.kw)
84
+ if r.status_code == 404:
85
+ raise FileNotFoundError(
86
+ f"Gist not found: {self.gist_id}@{self.sha or 'latest'}"
87
+ )
88
+ r.raise_for_status()
89
+ return r.json()
90
+
91
+ def _fetch_file_list(self):
92
+ """
93
+ Returns a list of dicts describing each file in the gist. These get stored
94
+ in self.dircache[""].
95
+ """
96
+ meta = self._fetch_gist_metadata()
97
+ if self.filenames:
98
+ available_files = meta.get("files", {})
99
+ files = {}
100
+ for fn in self.filenames:
101
+ if fn not in available_files:
102
+ raise FileNotFoundError(fn)
103
+ files[fn] = available_files[fn]
104
+ else:
105
+ files = meta.get("files", {})
106
+
107
+ out = []
108
+ for fname, finfo in files.items():
109
+ if finfo is None:
110
+ # Occasionally GitHub returns a file entry with null if it was deleted
111
+ continue
112
+ # Build a directory entry
113
+ out.append(
114
+ {
115
+ "name": fname, # file's name
116
+ "type": "file", # gists have no subdirectories
117
+ "size": finfo.get("size", 0), # file size in bytes
118
+ "raw_url": finfo.get("raw_url"),
119
+ }
120
+ )
121
+ return out
122
+
123
+ @classmethod
124
+ def _strip_protocol(cls, path):
125
+ """
126
+ Remove 'gist://' from the path, if present.
127
+ """
128
+ # The default infer_storage_options can handle gist://username:token@id/file
129
+ # or gist://id/file, but let's ensure we handle a normal usage too.
130
+ # We'll just strip the protocol prefix if it exists.
131
+ path = infer_storage_options(path).get("path", path)
132
+ return path.lstrip("/")
133
+
134
+ @staticmethod
135
+ def _get_kwargs_from_urls(path):
136
+ """
137
+ Parse 'gist://' style URLs into GistFileSystem constructor kwargs.
138
+ For example:
139
+ gist://:TOKEN@<gist_id>/file.txt
140
+ gist://username:TOKEN@<gist_id>/file.txt
141
+ """
142
+ so = infer_storage_options(path)
143
+ out = {}
144
+ if "username" in so and so["username"]:
145
+ out["username"] = so["username"]
146
+ if "password" in so and so["password"]:
147
+ out["token"] = so["password"]
148
+ if "host" in so and so["host"]:
149
+ # We interpret 'host' as the gist ID
150
+ out["gist_id"] = so["host"]
151
+
152
+ # Extract SHA and filename from path
153
+ if "path" in so and so["path"]:
154
+ path_parts = so["path"].rsplit("/", 2)[-2:]
155
+ if len(path_parts) == 2:
156
+ if path_parts[0]: # SHA present
157
+ out["sha"] = path_parts[0]
158
+ if path_parts[1]: # filename also present
159
+ out["filenames"] = [path_parts[1]]
160
+
161
+ return out
162
+
163
+ def ls(self, path="", detail=False, **kwargs):
164
+ """
165
+ List files in the gist. Gists are single-level, so any 'path' is basically
166
+ the filename, or empty for all files.
167
+
168
+ Parameters
169
+ ----------
170
+ path : str, optional
171
+ The filename to list. If empty, returns all files in the gist.
172
+ detail : bool, default False
173
+ If True, return a list of dicts; if False, return a list of filenames.
174
+ """
175
+ path = self._strip_protocol(path or "")
176
+ # If path is empty, return all
177
+ if path == "":
178
+ results = self.dircache[""]
179
+ else:
180
+ # We want just the single file with this name
181
+ all_files = self.dircache[""]
182
+ results = [f for f in all_files if f["name"] == path]
183
+ if not results:
184
+ raise FileNotFoundError(path)
185
+ if detail:
186
+ return results
187
+ else:
188
+ return sorted(f["name"] for f in results)
189
+
190
+ def _open(self, path, mode="rb", block_size=None, **kwargs):
191
+ """
192
+ Read a single file from the gist.
193
+ """
194
+ if mode != "rb":
195
+ raise NotImplementedError("GitHub Gist FS is read-only (no write).")
196
+
197
+ path = self._strip_protocol(path)
198
+ # Find the file entry in our dircache
199
+ matches = [f for f in self.dircache[""] if f["name"] == path]
200
+ if not matches:
201
+ raise FileNotFoundError(path)
202
+ finfo = matches[0]
203
+
204
+ raw_url = finfo.get("raw_url")
205
+ if not raw_url:
206
+ raise FileNotFoundError(f"No raw_url for file: {path}")
207
+
208
+ r = requests.get(raw_url, timeout=self.timeout, **self.kw)
209
+ if r.status_code == 404:
210
+ raise FileNotFoundError(path)
211
+ r.raise_for_status()
212
+ return MemoryFile(path, None, r.content)
213
+
214
+ def cat(self, path, recursive=False, on_error="raise", **kwargs):
215
+ """
216
+ Return {path: contents} for the given file or files. If 'recursive' is True,
217
+ and path is empty, returns all files in the gist.
218
+ """
219
+ paths = self.expand_path(path, recursive=recursive)
220
+ out = {}
221
+ for p in paths:
222
+ try:
223
+ with self.open(p, "rb") as f:
224
+ out[p] = f.read()
225
+ except FileNotFoundError as e:
226
+ if on_error == "raise":
227
+ raise e
228
+ elif on_error == "omit":
229
+ pass # skip
230
+ else:
231
+ out[p] = e
232
+ return out
tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/git.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import pygit2
4
+
5
+ from fsspec.spec import AbstractFileSystem
6
+
7
+ from .memory import MemoryFile
8
+
9
+
10
+ class GitFileSystem(AbstractFileSystem):
11
+ """Browse the files of a local git repo at any hash/tag/branch
12
+
13
+ (experimental backend)
14
+ """
15
+
16
+ root_marker = ""
17
+ cachable = True
18
+
19
+ def __init__(self, path=None, fo=None, ref=None, **kwargs):
20
+ """
21
+
22
+ Parameters
23
+ ----------
24
+ path: str (optional)
25
+ Local location of the repo (uses current directory if not given).
26
+ May be deprecated in favour of ``fo``. When used with a higher
27
+ level function such as fsspec.open(), may be of the form
28
+ "git://[path-to-repo[:]][ref@]path/to/file" (but the actual
29
+ file path should not contain "@" or ":").
30
+ fo: str (optional)
31
+ Same as ``path``, but passed as part of a chained URL. This one
32
+ takes precedence if both are given.
33
+ ref: str (optional)
34
+ Reference to work with, could be a hash, tag or branch name. Defaults
35
+ to current working tree. Note that ``ls`` and ``open`` also take hash,
36
+ so this becomes the default for those operations
37
+ kwargs
38
+ """
39
+ super().__init__(**kwargs)
40
+ self.repo = pygit2.Repository(fo or path or os.getcwd())
41
+ self.ref = ref or "master"
42
+
43
+ @classmethod
44
+ def _strip_protocol(cls, path):
45
+ path = super()._strip_protocol(path).lstrip("/")
46
+ if ":" in path:
47
+ path = path.split(":", 1)[1]
48
+ if "@" in path:
49
+ path = path.split("@", 1)[1]
50
+ return path.lstrip("/")
51
+
52
+ def _path_to_object(self, path, ref):
53
+ comm, ref = self.repo.resolve_refish(ref or self.ref)
54
+ parts = path.split("/")
55
+ tree = comm.tree
56
+ for part in parts:
57
+ if part and isinstance(tree, pygit2.Tree):
58
+ if part not in tree:
59
+ raise FileNotFoundError(path)
60
+ tree = tree[part]
61
+ return tree
62
+
63
+ @staticmethod
64
+ def _get_kwargs_from_urls(path):
65
+ path = path.removeprefix("git://")
66
+ out = {}
67
+ if ":" in path:
68
+ out["path"], path = path.split(":", 1)
69
+ if "@" in path:
70
+ out["ref"], path = path.split("@", 1)
71
+ return out
72
+
73
+ @staticmethod
74
+ def _object_to_info(obj, path=None):
75
+ # obj.name and obj.filemode are None for the root tree!
76
+ is_dir = isinstance(obj, pygit2.Tree)
77
+ return {
78
+ "type": "directory" if is_dir else "file",
79
+ "name": (
80
+ "/".join([path, obj.name or ""]).lstrip("/") if path else obj.name
81
+ ),
82
+ "hex": str(obj.id),
83
+ "mode": "100644" if obj.filemode is None else f"{obj.filemode:o}",
84
+ "size": 0 if is_dir else obj.size,
85
+ }
86
+
87
+ def ls(self, path, detail=True, ref=None, **kwargs):
88
+ tree = self._path_to_object(self._strip_protocol(path), ref)
89
+ return [
90
+ GitFileSystem._object_to_info(obj, path)
91
+ if detail
92
+ else GitFileSystem._object_to_info(obj, path)["name"]
93
+ for obj in (tree if isinstance(tree, pygit2.Tree) else [tree])
94
+ ]
95
+
96
+ def info(self, path, ref=None, **kwargs):
97
+ tree = self._path_to_object(self._strip_protocol(path), ref)
98
+ return GitFileSystem._object_to_info(tree, path)
99
+
100
+ def ukey(self, path, ref=None):
101
+ return self.info(path, ref=ref)["hex"]
102
+
103
+ def _open(
104
+ self,
105
+ path,
106
+ mode="rb",
107
+ block_size=None,
108
+ autocommit=True,
109
+ cache_options=None,
110
+ ref=None,
111
+ **kwargs,
112
+ ):
113
+ obj = self._path_to_object(path, ref or self.ref)
114
+ return MemoryFile(data=obj.data)
tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/github.py ADDED
@@ -0,0 +1,333 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ import re
3
+
4
+ import requests
5
+
6
+ from ..spec import AbstractFileSystem
7
+ from ..utils import infer_storage_options
8
+ from .memory import MemoryFile
9
+
10
+
11
+ class GithubFileSystem(AbstractFileSystem):
12
+ """Interface to files in github
13
+
14
+ An instance of this class provides the files residing within a remote github
15
+ repository. You may specify a point in the repos history, by SHA, branch
16
+ or tag (default is current master).
17
+
18
+ For files less than 1 MB in size, file content is returned directly in a
19
+ MemoryFile. For larger files, or for files tracked by git-lfs, file content
20
+ is returned as an HTTPFile wrapping the ``download_url`` provided by the
21
+ GitHub API.
22
+
23
+ When using fsspec.open, allows URIs of the form:
24
+
25
+ - "github://path/file", in which case you must specify org, repo and
26
+ may specify sha in the extra args
27
+ - 'github://org:repo@/precip/catalog.yml', where the org and repo are
28
+ part of the URI
29
+ - 'github://org:repo@sha/precip/catalog.yml', where the sha is also included
30
+
31
+ ``sha`` can be the full or abbreviated hex of the commit you want to fetch
32
+ from, or a branch or tag name (so long as it doesn't contain special characters
33
+ like "/", "?", which would have to be HTTP-encoded).
34
+
35
+ For authorised access, you must provide username and token, which can be made
36
+ at https://github.com/settings/tokens
37
+ """
38
+
39
+ url = "https://api.github.com/repos/{org}/{repo}/git/trees/{sha}"
40
+ content_url = "https://api.github.com/repos/{org}/{repo}/contents/{path}?ref={sha}"
41
+ protocol = "github"
42
+ timeout = (60, 60) # connect, read timeouts
43
+
44
+ def __init__(
45
+ self, org, repo, sha=None, username=None, token=None, timeout=None, **kwargs
46
+ ):
47
+ super().__init__(**kwargs)
48
+ self.org = org
49
+ self.repo = repo
50
+ if (username is None) ^ (token is None):
51
+ raise ValueError("Auth required both username and token")
52
+ self.username = username
53
+ self.token = token
54
+ if timeout is not None:
55
+ self.timeout = timeout
56
+ if sha is None:
57
+ # look up default branch (not necessarily "master")
58
+ u = "https://api.github.com/repos/{org}/{repo}"
59
+ r = requests.get(
60
+ u.format(org=org, repo=repo), timeout=self.timeout, **self.kw
61
+ )
62
+ r.raise_for_status()
63
+ sha = r.json()["default_branch"]
64
+
65
+ self.root = sha
66
+ self.ls("")
67
+ try:
68
+ from .http import HTTPFileSystem
69
+
70
+ self.http_fs = HTTPFileSystem(**kwargs)
71
+ except ImportError:
72
+ self.http_fs = None
73
+
74
+ @property
75
+ def kw(self):
76
+ if self.username:
77
+ return {"auth": (self.username, self.token)}
78
+ return {}
79
+
80
+ @classmethod
81
+ def repos(cls, org_or_user, is_org=True):
82
+ """List repo names for given org or user
83
+
84
+ This may become the top level of the FS
85
+
86
+ Parameters
87
+ ----------
88
+ org_or_user: str
89
+ Name of the github org or user to query
90
+ is_org: bool (default True)
91
+ Whether the name is an organisation (True) or user (False)
92
+
93
+ Returns
94
+ -------
95
+ List of string
96
+ """
97
+ r = requests.get(
98
+ f"https://api.github.com/{['users', 'orgs'][is_org]}/{org_or_user}/repos",
99
+ timeout=cls.timeout,
100
+ )
101
+ r.raise_for_status()
102
+ return [repo["name"] for repo in r.json()]
103
+
104
+ @property
105
+ def tags(self):
106
+ """Names of tags in the repo"""
107
+ r = requests.get(
108
+ f"https://api.github.com/repos/{self.org}/{self.repo}/tags",
109
+ timeout=self.timeout,
110
+ **self.kw,
111
+ )
112
+ r.raise_for_status()
113
+ return [t["name"] for t in r.json()]
114
+
115
+ @property
116
+ def branches(self):
117
+ """Names of branches in the repo"""
118
+ r = requests.get(
119
+ f"https://api.github.com/repos/{self.org}/{self.repo}/branches",
120
+ timeout=self.timeout,
121
+ **self.kw,
122
+ )
123
+ r.raise_for_status()
124
+ return [t["name"] for t in r.json()]
125
+
126
+ @property
127
+ def refs(self):
128
+ """Named references, tags and branches"""
129
+ return {"tags": self.tags, "branches": self.branches}
130
+
131
+ def ls(self, path, detail=False, sha=None, _sha=None, **kwargs):
132
+ """List files at given path
133
+
134
+ Parameters
135
+ ----------
136
+ path: str
137
+ Location to list, relative to repo root
138
+ detail: bool
139
+ If True, returns list of dicts, one per file; if False, returns
140
+ list of full filenames only
141
+ sha: str (optional)
142
+ List at the given point in the repo history, branch or tag name or commit
143
+ SHA
144
+ _sha: str (optional)
145
+ List this specific tree object (used internally to descend into trees)
146
+ """
147
+ path = self._strip_protocol(path)
148
+ if path == "":
149
+ _sha = sha or self.root
150
+ if _sha is None:
151
+ parts = path.rstrip("/").split("/")
152
+ so_far = ""
153
+ _sha = sha or self.root
154
+ for part in parts:
155
+ out = self.ls(so_far, True, sha=sha, _sha=_sha)
156
+ so_far += "/" + part if so_far else part
157
+ out = [o for o in out if o["name"] == so_far]
158
+ if not out:
159
+ raise FileNotFoundError(path)
160
+ out = out[0]
161
+ if out["type"] == "file":
162
+ if detail:
163
+ return [out]
164
+ else:
165
+ return path
166
+ _sha = out["sha"]
167
+ if path not in self.dircache or sha not in [self.root, None]:
168
+ r = requests.get(
169
+ self.url.format(org=self.org, repo=self.repo, sha=_sha),
170
+ timeout=self.timeout,
171
+ **self.kw,
172
+ )
173
+ if r.status_code == 404:
174
+ raise FileNotFoundError(path)
175
+ r.raise_for_status()
176
+ types = {"blob": "file", "tree": "directory"}
177
+ out = [
178
+ {
179
+ "name": path + "/" + f["path"] if path else f["path"],
180
+ "mode": f["mode"],
181
+ "type": types[f["type"]],
182
+ "size": f.get("size", 0),
183
+ "sha": f["sha"],
184
+ }
185
+ for f in r.json()["tree"]
186
+ if f["type"] in types
187
+ ]
188
+ if sha in [self.root, None]:
189
+ self.dircache[path] = out
190
+ else:
191
+ out = self.dircache[path]
192
+ if detail:
193
+ return out
194
+ else:
195
+ return sorted([f["name"] for f in out])
196
+
197
+ def invalidate_cache(self, path=None):
198
+ self.dircache.clear()
199
+
200
+ @classmethod
201
+ def _strip_protocol(cls, path):
202
+ opts = infer_storage_options(path)
203
+ if "username" not in opts:
204
+ return super()._strip_protocol(path)
205
+ return opts["path"].lstrip("/")
206
+
207
+ @staticmethod
208
+ def _get_kwargs_from_urls(path):
209
+ opts = infer_storage_options(path)
210
+ if "username" not in opts:
211
+ return {}
212
+ out = {"org": opts["username"], "repo": opts["password"]}
213
+ if opts["host"]:
214
+ out["sha"] = opts["host"]
215
+ return out
216
+
217
+ def _open(
218
+ self,
219
+ path,
220
+ mode="rb",
221
+ block_size=None,
222
+ cache_options=None,
223
+ sha=None,
224
+ **kwargs,
225
+ ):
226
+ if mode != "rb":
227
+ raise NotImplementedError
228
+
229
+ # construct a url to hit the GitHub API's repo contents API
230
+ url = self.content_url.format(
231
+ org=self.org, repo=self.repo, path=path, sha=sha or self.root
232
+ )
233
+
234
+ # make a request to this API, and parse the response as JSON
235
+ r = requests.get(url, timeout=self.timeout, **self.kw)
236
+ if r.status_code == 404:
237
+ raise FileNotFoundError(path)
238
+ r.raise_for_status()
239
+ content_json = r.json()
240
+
241
+ # if the response's content key is not empty, try to parse it as base64
242
+ if content_json["content"]:
243
+ content = base64.b64decode(content_json["content"])
244
+
245
+ # as long as the content does not start with the string
246
+ # "version https://git-lfs.github.com/"
247
+ # then it is probably not a git-lfs pointer and we can just return
248
+ # the content directly
249
+ if not content.startswith(b"version https://git-lfs.github.com/"):
250
+ return MemoryFile(None, None, content)
251
+
252
+ # we land here if the content was not present in the first response
253
+ # (regular file over 1MB or git-lfs tracked file)
254
+ # in this case, we get let the HTTPFileSystem handle the download
255
+ if self.http_fs is None:
256
+ raise ImportError(
257
+ "Please install fsspec[http] to access github files >1 MB "
258
+ "or git-lfs tracked files."
259
+ )
260
+ return self.http_fs.open(
261
+ content_json["download_url"],
262
+ mode=mode,
263
+ block_size=block_size,
264
+ cache_options=cache_options,
265
+ **kwargs,
266
+ )
267
+
268
+ def rm(self, path, recursive=False, maxdepth=None, message=None):
269
+ path = self.expand_path(path, recursive=recursive, maxdepth=maxdepth)
270
+ for p in reversed(path):
271
+ self.rm_file(p, message=message)
272
+
273
+ def rm_file(self, path, message=None, **kwargs):
274
+ """
275
+ Remove a file from a specified branch using a given commit message.
276
+
277
+ Since Github DELETE operation requires a branch name, and we can't reliably
278
+ determine whether the provided SHA refers to a branch, tag, or commit, we
279
+ assume it's a branch. If it's not, the user will encounter an error when
280
+ attempting to retrieve the file SHA or delete the file.
281
+
282
+ Parameters
283
+ ----------
284
+ path: str
285
+ The file's location relative to the repository root.
286
+ message: str, optional
287
+ The commit message for the deletion.
288
+ """
289
+
290
+ if not self.username:
291
+ raise ValueError("Authentication required")
292
+
293
+ path = self._strip_protocol(path)
294
+
295
+ # Attempt to get SHA from cache or Github API
296
+ sha = self._get_sha_from_cache(path)
297
+ if not sha:
298
+ url = self.content_url.format(
299
+ org=self.org, repo=self.repo, path=path.lstrip("/"), sha=self.root
300
+ )
301
+ r = requests.get(url, timeout=self.timeout, **self.kw)
302
+ if r.status_code == 404:
303
+ raise FileNotFoundError(path)
304
+ r.raise_for_status()
305
+ sha = r.json()["sha"]
306
+
307
+ # Delete the file
308
+ delete_url = self.content_url.format(
309
+ org=self.org, repo=self.repo, path=path, sha=self.root
310
+ )
311
+ branch = self.root
312
+ data = {
313
+ "message": message or f"Delete {path}",
314
+ "sha": sha,
315
+ **({"branch": branch} if branch else {}),
316
+ }
317
+
318
+ r = requests.delete(delete_url, json=data, timeout=self.timeout, **self.kw)
319
+ error_message = r.json().get("message", "")
320
+ if re.search(r"Branch .+ not found", error_message):
321
+ error = "Remove only works when the filesystem is initialised from a branch or default (None)"
322
+ raise ValueError(error)
323
+ r.raise_for_status()
324
+
325
+ self.invalidate_cache(path)
326
+
327
+ def _get_sha_from_cache(self, path):
328
+ for entries in self.dircache.values():
329
+ for entry in entries:
330
+ entry_path = entry.get("name")
331
+ if entry_path and entry_path == path and "sha" in entry:
332
+ return entry["sha"]
333
+ return None
tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/http.py ADDED
@@ -0,0 +1,890 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import io
3
+ import logging
4
+ import re
5
+ import weakref
6
+ from copy import copy
7
+ from urllib.parse import urlparse
8
+
9
+ import aiohttp
10
+ import yarl
11
+
12
+ from fsspec.asyn import AbstractAsyncStreamedFile, AsyncFileSystem, sync, sync_wrapper
13
+ from fsspec.callbacks import DEFAULT_CALLBACK
14
+ from fsspec.exceptions import FSTimeoutError
15
+ from fsspec.spec import AbstractBufferedFile
16
+ from fsspec.utils import (
17
+ DEFAULT_BLOCK_SIZE,
18
+ glob_translate,
19
+ isfilelike,
20
+ nullcontext,
21
+ tokenize,
22
+ )
23
+
24
+ from ..caching import AllBytes
25
+
26
+ # https://stackoverflow.com/a/15926317/3821154
27
+ ex = re.compile(r"""<(a|A)\s+(?:[^>]*?\s+)?(href|HREF)=["'](?P<url>[^"']+)""")
28
+ ex2 = re.compile(r"""(?P<url>http[s]?://[-a-zA-Z0-9@:%_+.~#?&/=]+)""")
29
+ logger = logging.getLogger("fsspec.http")
30
+
31
+
32
+ async def get_client(**kwargs):
33
+ return aiohttp.ClientSession(**kwargs)
34
+
35
+
36
+ class HTTPFileSystem(AsyncFileSystem):
37
+ """
38
+ Simple File-System for fetching data via HTTP(S)
39
+
40
+ ``ls()`` is implemented by loading the parent page and doing a regex
41
+ match on the result. If simple_link=True, anything of the form
42
+ "http(s)://server.com/stuff?thing=other"; otherwise only links within
43
+ HTML href tags will be used.
44
+ """
45
+
46
+ sep = "/"
47
+
48
+ def __init__(
49
+ self,
50
+ simple_links=True,
51
+ block_size=None,
52
+ same_scheme=True,
53
+ size_policy=None,
54
+ cache_type="bytes",
55
+ cache_options=None,
56
+ asynchronous=False,
57
+ loop=None,
58
+ client_kwargs=None,
59
+ get_client=get_client,
60
+ encoded=False,
61
+ **storage_options,
62
+ ):
63
+ """
64
+ NB: if this is called async, you must await set_client
65
+
66
+ Parameters
67
+ ----------
68
+ block_size: int
69
+ Blocks to read bytes; if 0, will default to raw requests file-like
70
+ objects instead of HTTPFile instances
71
+ simple_links: bool
72
+ If True, will consider both HTML <a> tags and anything that looks
73
+ like a URL; if False, will consider only the former.
74
+ same_scheme: True
75
+ When doing ls/glob, if this is True, only consider paths that have
76
+ http/https matching the input URLs.
77
+ size_policy: this argument is deprecated
78
+ client_kwargs: dict
79
+ Passed to aiohttp.ClientSession, see
80
+ https://docs.aiohttp.org/en/stable/client_reference.html
81
+ For example, ``{'auth': aiohttp.BasicAuth('user', 'pass')}``
82
+ get_client: Callable[..., aiohttp.ClientSession]
83
+ A callable, which takes keyword arguments and constructs
84
+ an aiohttp.ClientSession. Its state will be managed by
85
+ the HTTPFileSystem class.
86
+ storage_options: key-value
87
+ Any other parameters passed on to requests
88
+ cache_type, cache_options: defaults used in open()
89
+ """
90
+ super().__init__(self, asynchronous=asynchronous, loop=loop, **storage_options)
91
+ self.block_size = block_size if block_size is not None else DEFAULT_BLOCK_SIZE
92
+ self.simple_links = simple_links
93
+ self.same_schema = same_scheme
94
+ self.cache_type = cache_type
95
+ self.cache_options = cache_options
96
+ self.client_kwargs = client_kwargs or {}
97
+ self.get_client = get_client
98
+ self.encoded = encoded
99
+ self.kwargs = storage_options
100
+ self._session = None
101
+
102
+ # Clean caching-related parameters from `storage_options`
103
+ # before propagating them as `request_options` through `self.kwargs`.
104
+ # TODO: Maybe rename `self.kwargs` to `self.request_options` to make
105
+ # it clearer.
106
+ request_options = copy(storage_options)
107
+ self.use_listings_cache = request_options.pop("use_listings_cache", False)
108
+ request_options.pop("listings_expiry_time", None)
109
+ request_options.pop("max_paths", None)
110
+ request_options.pop("skip_instance_cache", None)
111
+ self.kwargs = request_options
112
+
113
+ @property
114
+ def fsid(self):
115
+ return "http"
116
+
117
+ def encode_url(self, url):
118
+ return yarl.URL(url, encoded=self.encoded)
119
+
120
+ @staticmethod
121
+ def close_session(loop, session):
122
+ if loop is not None and loop.is_running():
123
+ try:
124
+ sync(loop, session.close, timeout=0.1)
125
+ return
126
+ except (TimeoutError, FSTimeoutError, NotImplementedError):
127
+ pass
128
+ connector = getattr(session, "_connector", None)
129
+ if connector is not None:
130
+ # close after loop is dead
131
+ connector._close()
132
+
133
+ async def set_session(self):
134
+ if self._session is None:
135
+ self._session = await self.get_client(loop=self.loop, **self.client_kwargs)
136
+ if not self.asynchronous:
137
+ weakref.finalize(self, self.close_session, self.loop, self._session)
138
+ return self._session
139
+
140
+ @classmethod
141
+ def _strip_protocol(cls, path):
142
+ """For HTTP, we always want to keep the full URL"""
143
+ return path
144
+
145
+ @classmethod
146
+ def _parent(cls, path):
147
+ # override, since _strip_protocol is different for URLs
148
+ par = super()._parent(path)
149
+ if len(par) > 7: # "http://..."
150
+ return par
151
+ return ""
152
+
153
+ async def _ls_real(self, url, detail=True, **kwargs):
154
+ # ignoring URL-encoded arguments
155
+ kw = self.kwargs.copy()
156
+ kw.update(kwargs)
157
+ logger.debug(url)
158
+ session = await self.set_session()
159
+ async with session.get(self.encode_url(url), **self.kwargs) as r:
160
+ self._raise_not_found_for_status(r, url)
161
+
162
+ if "Content-Type" in r.headers:
163
+ mimetype = r.headers["Content-Type"].partition(";")[0]
164
+ else:
165
+ mimetype = None
166
+
167
+ if mimetype in ("text/html", None):
168
+ try:
169
+ text = await r.text(errors="ignore")
170
+ if self.simple_links:
171
+ links = ex2.findall(text) + [u[2] for u in ex.findall(text)]
172
+ else:
173
+ links = [u[2] for u in ex.findall(text)]
174
+ except UnicodeDecodeError:
175
+ links = [] # binary, not HTML
176
+ else:
177
+ links = []
178
+
179
+ out = set()
180
+ parts = urlparse(url)
181
+ for l in links:
182
+ if isinstance(l, tuple):
183
+ l = l[1]
184
+ if l.startswith("/") and len(l) > 1:
185
+ # absolute URL on this server
186
+ l = f"{parts.scheme}://{parts.netloc}{l}"
187
+ if l.startswith("http"):
188
+ if self.same_schema and l.startswith(url.rstrip("/") + "/"):
189
+ out.add(l)
190
+ elif l.replace("https", "http").startswith(
191
+ url.replace("https", "http").rstrip("/") + "/"
192
+ ):
193
+ # allowed to cross http <-> https
194
+ out.add(l)
195
+ else:
196
+ if l not in ["..", "../"]:
197
+ # Ignore FTP-like "parent"
198
+ out.add("/".join([url.rstrip("/"), l.lstrip("/")]))
199
+ if not out and url.endswith("/"):
200
+ out = await self._ls_real(url.rstrip("/"), detail=False)
201
+ if detail:
202
+ return [
203
+ {
204
+ "name": u,
205
+ "size": None,
206
+ "type": "directory" if u.endswith("/") else "file",
207
+ }
208
+ for u in out
209
+ ]
210
+ else:
211
+ return sorted(out)
212
+
213
+ async def _ls(self, url, detail=True, **kwargs):
214
+ if self.use_listings_cache and url in self.dircache:
215
+ out = self.dircache[url]
216
+ else:
217
+ out = await self._ls_real(url, detail=detail, **kwargs)
218
+ self.dircache[url] = out
219
+ return out
220
+
221
+ ls = sync_wrapper(_ls)
222
+
223
+ def _raise_not_found_for_status(self, response, url):
224
+ """
225
+ Raises FileNotFoundError for 404s, otherwise uses raise_for_status.
226
+ """
227
+ if response.status == 404:
228
+ raise FileNotFoundError(url)
229
+ response.raise_for_status()
230
+
231
+ async def _cat_file(self, url, start=None, end=None, **kwargs):
232
+ kw = self.kwargs.copy()
233
+ kw.update(kwargs)
234
+ logger.debug(url)
235
+
236
+ if start is not None or end is not None:
237
+ if start == end:
238
+ return b""
239
+ headers = kw.pop("headers", {}).copy()
240
+
241
+ headers["Range"] = await self._process_limits(url, start, end)
242
+ kw["headers"] = headers
243
+ session = await self.set_session()
244
+ async with session.get(self.encode_url(url), **kw) as r:
245
+ out = await r.read()
246
+ self._raise_not_found_for_status(r, url)
247
+ return out
248
+
249
+ async def _get_file(
250
+ self, rpath, lpath, chunk_size=5 * 2**20, callback=DEFAULT_CALLBACK, **kwargs
251
+ ):
252
+ kw = self.kwargs.copy()
253
+ kw.update(kwargs)
254
+ logger.debug(rpath)
255
+ session = await self.set_session()
256
+ async with session.get(self.encode_url(rpath), **kw) as r:
257
+ try:
258
+ size = int(r.headers["content-length"])
259
+ except (ValueError, KeyError):
260
+ size = None
261
+
262
+ callback.set_size(size)
263
+ self._raise_not_found_for_status(r, rpath)
264
+ if isfilelike(lpath):
265
+ outfile = lpath
266
+ else:
267
+ outfile = open(lpath, "wb") # noqa: ASYNC230
268
+
269
+ try:
270
+ chunk = True
271
+ while chunk:
272
+ chunk = await r.content.read(chunk_size)
273
+ outfile.write(chunk)
274
+ callback.relative_update(len(chunk))
275
+ finally:
276
+ if not isfilelike(lpath):
277
+ outfile.close()
278
+
279
+ async def _put_file(
280
+ self,
281
+ lpath,
282
+ rpath,
283
+ chunk_size=5 * 2**20,
284
+ callback=DEFAULT_CALLBACK,
285
+ method="post",
286
+ mode="overwrite",
287
+ **kwargs,
288
+ ):
289
+ if mode != "overwrite":
290
+ raise NotImplementedError("Exclusive write")
291
+
292
+ async def gen_chunks():
293
+ # Support passing arbitrary file-like objects
294
+ # and use them instead of streams.
295
+ if isinstance(lpath, io.IOBase):
296
+ context = nullcontext(lpath)
297
+ use_seek = False # might not support seeking
298
+ else:
299
+ context = open(lpath, "rb") # noqa: ASYNC230
300
+ use_seek = True
301
+
302
+ with context as f:
303
+ if use_seek:
304
+ callback.set_size(f.seek(0, 2))
305
+ f.seek(0)
306
+ else:
307
+ callback.set_size(getattr(f, "size", None))
308
+
309
+ chunk = f.read(chunk_size)
310
+ while chunk:
311
+ yield chunk
312
+ callback.relative_update(len(chunk))
313
+ chunk = f.read(chunk_size)
314
+
315
+ kw = self.kwargs.copy()
316
+ kw.update(kwargs)
317
+ session = await self.set_session()
318
+
319
+ method = method.lower()
320
+ if method not in ("post", "put"):
321
+ raise ValueError(
322
+ f"method has to be either 'post' or 'put', not: {method!r}"
323
+ )
324
+
325
+ meth = getattr(session, method)
326
+ async with meth(self.encode_url(rpath), data=gen_chunks(), **kw) as resp:
327
+ self._raise_not_found_for_status(resp, rpath)
328
+
329
+ async def _exists(self, path, **kwargs):
330
+ kw = self.kwargs.copy()
331
+ kw.update(kwargs)
332
+ try:
333
+ logger.debug(path)
334
+ session = await self.set_session()
335
+ r = await session.get(self.encode_url(path), **kw)
336
+ async with r:
337
+ return r.status < 400
338
+ except aiohttp.ClientError:
339
+ return False
340
+
341
+ async def _isfile(self, path, **kwargs):
342
+ return await self._exists(path, **kwargs)
343
+
344
+ def _open(
345
+ self,
346
+ path,
347
+ mode="rb",
348
+ block_size=None,
349
+ autocommit=None, # XXX: This differs from the base class.
350
+ cache_type=None,
351
+ cache_options=None,
352
+ size=None,
353
+ **kwargs,
354
+ ):
355
+ """Make a file-like object
356
+
357
+ Parameters
358
+ ----------
359
+ path: str
360
+ Full URL with protocol
361
+ mode: string
362
+ must be "rb"
363
+ block_size: int or None
364
+ Bytes to download in one request; use instance value if None. If
365
+ zero, will return a streaming Requests file-like instance.
366
+ kwargs: key-value
367
+ Any other parameters, passed to requests calls
368
+ """
369
+ if mode != "rb":
370
+ raise NotImplementedError
371
+ block_size = block_size if block_size is not None else self.block_size
372
+ kw = self.kwargs.copy()
373
+ kw["asynchronous"] = self.asynchronous
374
+ kw.update(kwargs)
375
+ info = {}
376
+ size = size or info.update(self.info(path, **kwargs)) or info["size"]
377
+ session = sync(self.loop, self.set_session)
378
+ if block_size and size and info.get("partial", True):
379
+ return HTTPFile(
380
+ self,
381
+ path,
382
+ session=session,
383
+ block_size=block_size,
384
+ mode=mode,
385
+ size=size,
386
+ cache_type=cache_type or self.cache_type,
387
+ cache_options=cache_options or self.cache_options,
388
+ loop=self.loop,
389
+ **kw,
390
+ )
391
+ else:
392
+ return HTTPStreamFile(
393
+ self,
394
+ path,
395
+ mode=mode,
396
+ loop=self.loop,
397
+ session=session,
398
+ **kw,
399
+ )
400
+
401
+ async def open_async(self, path, mode="rb", size=None, **kwargs):
402
+ session = await self.set_session()
403
+ if size is None:
404
+ try:
405
+ size = (await self._info(path, **kwargs))["size"]
406
+ except FileNotFoundError:
407
+ pass
408
+ return AsyncStreamFile(
409
+ self,
410
+ path,
411
+ loop=self.loop,
412
+ session=session,
413
+ size=size,
414
+ **kwargs,
415
+ )
416
+
417
+ def ukey(self, url):
418
+ """Unique identifier; assume HTTP files are static, unchanging"""
419
+ return tokenize(url, self.kwargs, self.protocol)
420
+
421
+ async def _info(self, url, **kwargs):
422
+ """Get info of URL
423
+
424
+ Tries to access location via HEAD, and then GET methods, but does
425
+ not fetch the data.
426
+
427
+ It is possible that the server does not supply any size information, in
428
+ which case size will be given as None (and certain operations on the
429
+ corresponding file will not work).
430
+ """
431
+ info = {}
432
+ session = await self.set_session()
433
+
434
+ for policy in ["head", "get"]:
435
+ try:
436
+ info.update(
437
+ await _file_info(
438
+ self.encode_url(url),
439
+ size_policy=policy,
440
+ session=session,
441
+ **self.kwargs,
442
+ **kwargs,
443
+ )
444
+ )
445
+ if info.get("size") is not None:
446
+ break
447
+ except Exception as exc:
448
+ if policy == "get":
449
+ # If get failed, then raise a FileNotFoundError
450
+ raise FileNotFoundError(url) from exc
451
+ logger.debug("", exc_info=exc)
452
+
453
+ return {"name": url, "size": None, **info, "type": "file"}
454
+
455
+ async def _glob(self, path, maxdepth=None, **kwargs):
456
+ """
457
+ Find files by glob-matching.
458
+
459
+ This implementation is idntical to the one in AbstractFileSystem,
460
+ but "?" is not considered as a character for globbing, because it is
461
+ so common in URLs, often identifying the "query" part.
462
+ """
463
+ if maxdepth is not None and maxdepth < 1:
464
+ raise ValueError("maxdepth must be at least 1")
465
+ import re
466
+
467
+ ends_with_slash = path.endswith("/") # _strip_protocol strips trailing slash
468
+ path = self._strip_protocol(path)
469
+ append_slash_to_dirname = ends_with_slash or path.endswith(("/**", "/*"))
470
+ idx_star = path.find("*") if path.find("*") >= 0 else len(path)
471
+ idx_brace = path.find("[") if path.find("[") >= 0 else len(path)
472
+
473
+ min_idx = min(idx_star, idx_brace)
474
+
475
+ detail = kwargs.pop("detail", False)
476
+
477
+ if not has_magic(path):
478
+ if await self._exists(path, **kwargs):
479
+ if not detail:
480
+ return [path]
481
+ else:
482
+ return {path: await self._info(path, **kwargs)}
483
+ else:
484
+ if not detail:
485
+ return [] # glob of non-existent returns empty
486
+ else:
487
+ return {}
488
+ elif "/" in path[:min_idx]:
489
+ min_idx = path[:min_idx].rindex("/")
490
+ root = path[: min_idx + 1]
491
+ depth = path[min_idx + 1 :].count("/") + 1
492
+ else:
493
+ root = ""
494
+ depth = path[min_idx + 1 :].count("/") + 1
495
+
496
+ if "**" in path:
497
+ if maxdepth is not None:
498
+ idx_double_stars = path.find("**")
499
+ depth_double_stars = path[idx_double_stars:].count("/") + 1
500
+ depth = depth - depth_double_stars + maxdepth
501
+ else:
502
+ depth = None
503
+
504
+ allpaths = await self._find(
505
+ root, maxdepth=depth, withdirs=True, detail=True, **kwargs
506
+ )
507
+
508
+ pattern = glob_translate(path + ("/" if ends_with_slash else ""))
509
+ pattern = re.compile(pattern)
510
+
511
+ out = {
512
+ (
513
+ p.rstrip("/")
514
+ if not append_slash_to_dirname
515
+ and info["type"] == "directory"
516
+ and p.endswith("/")
517
+ else p
518
+ ): info
519
+ for p, info in sorted(allpaths.items())
520
+ if pattern.match(p.rstrip("/"))
521
+ }
522
+
523
+ if detail:
524
+ return out
525
+ else:
526
+ return list(out)
527
+
528
+ async def _isdir(self, path):
529
+ # override, since all URLs are (also) files
530
+ try:
531
+ return bool(await self._ls(path))
532
+ except (FileNotFoundError, ValueError):
533
+ return False
534
+
535
+ async def _pipe_file(self, path, value, mode="overwrite", **kwargs):
536
+ """
537
+ Write bytes to a remote file over HTTP.
538
+
539
+ Parameters
540
+ ----------
541
+ path : str
542
+ Target URL where the data should be written
543
+ value : bytes
544
+ Data to be written
545
+ mode : str
546
+ How to write to the file - 'overwrite' or 'append'
547
+ **kwargs : dict
548
+ Additional parameters to pass to the HTTP request
549
+ """
550
+ url = self._strip_protocol(path)
551
+ headers = kwargs.pop("headers", {})
552
+ headers["Content-Length"] = str(len(value))
553
+
554
+ session = await self.set_session()
555
+
556
+ async with session.put(url, data=value, headers=headers, **kwargs) as r:
557
+ r.raise_for_status()
558
+
559
+
560
+ class HTTPFile(AbstractBufferedFile):
561
+ """
562
+ A file-like object pointing to a remote HTTP(S) resource
563
+
564
+ Supports only reading, with read-ahead of a predetermined block-size.
565
+
566
+ In the case that the server does not supply the filesize, only reading of
567
+ the complete file in one go is supported.
568
+
569
+ Parameters
570
+ ----------
571
+ url: str
572
+ Full URL of the remote resource, including the protocol
573
+ session: aiohttp.ClientSession or None
574
+ All calls will be made within this session, to avoid restarting
575
+ connections where the server allows this
576
+ block_size: int or None
577
+ The amount of read-ahead to do, in bytes. Default is 5MB, or the value
578
+ configured for the FileSystem creating this file
579
+ size: None or int
580
+ If given, this is the size of the file in bytes, and we don't attempt
581
+ to call the server to find the value.
582
+ kwargs: all other key-values are passed to requests calls.
583
+ """
584
+
585
+ def __init__(
586
+ self,
587
+ fs,
588
+ url,
589
+ session=None,
590
+ block_size=None,
591
+ mode="rb",
592
+ cache_type="bytes",
593
+ cache_options=None,
594
+ size=None,
595
+ loop=None,
596
+ asynchronous=False,
597
+ **kwargs,
598
+ ):
599
+ if mode != "rb":
600
+ raise NotImplementedError("File mode not supported")
601
+ self.asynchronous = asynchronous
602
+ self.loop = loop
603
+ self.url = url
604
+ self.session = session
605
+ self.details = {"name": url, "size": size, "type": "file"}
606
+ super().__init__(
607
+ fs=fs,
608
+ path=url,
609
+ mode=mode,
610
+ block_size=block_size,
611
+ cache_type=cache_type,
612
+ cache_options=cache_options,
613
+ **kwargs,
614
+ )
615
+
616
+ def read(self, length=-1):
617
+ """Read bytes from file
618
+
619
+ Parameters
620
+ ----------
621
+ length: int
622
+ Read up to this many bytes. If negative, read all content to end of
623
+ file. If the server has not supplied the filesize, attempting to
624
+ read only part of the data will raise a ValueError.
625
+ """
626
+ if (
627
+ (length < 0 and self.loc == 0) # explicit read all
628
+ # but not when the size is known and fits into a block anyways
629
+ and not (self.size is not None and self.size <= self.blocksize)
630
+ ):
631
+ self._fetch_all()
632
+ if self.size is None:
633
+ if length < 0:
634
+ self._fetch_all()
635
+ else:
636
+ length = min(self.size - self.loc, length)
637
+ return super().read(length)
638
+
639
+ async def async_fetch_all(self):
640
+ """Read whole file in one shot, without caching
641
+
642
+ This is only called when position is still at zero,
643
+ and read() is called without a byte-count.
644
+ """
645
+ logger.debug(f"Fetch all for {self}")
646
+ if not isinstance(self.cache, AllBytes):
647
+ r = await self.session.get(self.fs.encode_url(self.url), **self.kwargs)
648
+ async with r:
649
+ r.raise_for_status()
650
+ out = await r.read()
651
+ self.cache = AllBytes(
652
+ size=len(out), fetcher=None, blocksize=None, data=out
653
+ )
654
+ self.size = len(out)
655
+
656
+ _fetch_all = sync_wrapper(async_fetch_all)
657
+
658
+ def _parse_content_range(self, headers):
659
+ """Parse the Content-Range header"""
660
+ s = headers.get("Content-Range", "")
661
+ m = re.match(r"bytes (\d+-\d+|\*)/(\d+|\*)", s)
662
+ if not m:
663
+ return None, None, None
664
+
665
+ if m[1] == "*":
666
+ start = end = None
667
+ else:
668
+ start, end = [int(x) for x in m[1].split("-")]
669
+ total = None if m[2] == "*" else int(m[2])
670
+ return start, end, total
671
+
672
+ async def async_fetch_range(self, start, end):
673
+ """Download a block of data
674
+
675
+ The expectation is that the server returns only the requested bytes,
676
+ with HTTP code 206. If this is not the case, we first check the headers,
677
+ and then stream the output - if the data size is bigger than we
678
+ requested, an exception is raised.
679
+ """
680
+ logger.debug(f"Fetch range for {self}: {start}-{end}")
681
+ kwargs = self.kwargs.copy()
682
+ headers = kwargs.pop("headers", {}).copy()
683
+ headers["Range"] = f"bytes={start}-{end - 1}"
684
+ logger.debug(f"{self.url} : {headers['Range']}")
685
+ r = await self.session.get(
686
+ self.fs.encode_url(self.url), headers=headers, **kwargs
687
+ )
688
+ async with r:
689
+ if r.status == 416:
690
+ # range request outside file
691
+ return b""
692
+ r.raise_for_status()
693
+
694
+ # If the server has handled the range request, it should reply
695
+ # with status 206 (partial content). But we'll guess that a suitable
696
+ # Content-Range header or a Content-Length no more than the
697
+ # requested range also mean we have got the desired range.
698
+ response_is_range = (
699
+ r.status == 206
700
+ or self._parse_content_range(r.headers)[0] == start
701
+ or int(r.headers.get("Content-Length", end + 1)) <= end - start
702
+ )
703
+
704
+ if response_is_range:
705
+ # partial content, as expected
706
+ out = await r.read()
707
+ elif start > 0:
708
+ raise ValueError(
709
+ "The HTTP server doesn't appear to support range requests. "
710
+ "Only reading this file from the beginning is supported. "
711
+ "Open with block_size=0 for a streaming file interface."
712
+ )
713
+ else:
714
+ # Response is not a range, but we want the start of the file,
715
+ # so we can read the required amount anyway.
716
+ cl = 0
717
+ out = []
718
+ while True:
719
+ chunk = await r.content.read(2**20)
720
+ # data size unknown, let's read until we have enough
721
+ if chunk:
722
+ out.append(chunk)
723
+ cl += len(chunk)
724
+ if cl > end - start:
725
+ break
726
+ else:
727
+ break
728
+ out = b"".join(out)[: end - start]
729
+ return out
730
+
731
+ _fetch_range = sync_wrapper(async_fetch_range)
732
+
733
+
734
+ magic_check = re.compile("([*[])")
735
+
736
+
737
+ def has_magic(s):
738
+ match = magic_check.search(s)
739
+ return match is not None
740
+
741
+
742
+ class HTTPStreamFile(AbstractBufferedFile):
743
+ def __init__(self, fs, url, mode="rb", loop=None, session=None, **kwargs):
744
+ self.asynchronous = kwargs.pop("asynchronous", False)
745
+ self.url = url
746
+ self.loop = loop
747
+ self.session = session
748
+ if mode != "rb":
749
+ raise ValueError
750
+ self.details = {"name": url, "size": None}
751
+ super().__init__(fs=fs, path=url, mode=mode, cache_type="none", **kwargs)
752
+
753
+ async def cor():
754
+ r = await self.session.get(self.fs.encode_url(url), **kwargs).__aenter__()
755
+ self.fs._raise_not_found_for_status(r, url)
756
+ return r
757
+
758
+ self.r = sync(self.loop, cor)
759
+ self.loop = fs.loop
760
+
761
+ def seek(self, loc, whence=0):
762
+ if loc == 0 and whence == 1:
763
+ return
764
+ if loc == self.loc and whence == 0:
765
+ return
766
+ raise ValueError("Cannot seek streaming HTTP file")
767
+
768
+ async def _read(self, num=-1):
769
+ out = await self.r.content.read(num)
770
+ self.loc += len(out)
771
+ return out
772
+
773
+ read = sync_wrapper(_read)
774
+
775
+ async def _close(self):
776
+ self.r.close()
777
+
778
+ def close(self):
779
+ asyncio.run_coroutine_threadsafe(self._close(), self.loop)
780
+ super().close()
781
+
782
+
783
+ class AsyncStreamFile(AbstractAsyncStreamedFile):
784
+ def __init__(
785
+ self, fs, url, mode="rb", loop=None, session=None, size=None, **kwargs
786
+ ):
787
+ self.url = url
788
+ self.session = session
789
+ self.r = None
790
+ if mode != "rb":
791
+ raise ValueError
792
+ self.details = {"name": url, "size": None}
793
+ self.kwargs = kwargs
794
+ super().__init__(fs=fs, path=url, mode=mode, cache_type="none")
795
+ self.size = size
796
+
797
+ async def read(self, num=-1):
798
+ if self.r is None:
799
+ r = await self.session.get(
800
+ self.fs.encode_url(self.url), **self.kwargs
801
+ ).__aenter__()
802
+ self.fs._raise_not_found_for_status(r, self.url)
803
+ self.r = r
804
+ out = await self.r.content.read(num)
805
+ self.loc += len(out)
806
+ return out
807
+
808
+ async def close(self):
809
+ if self.r is not None:
810
+ self.r.close()
811
+ self.r = None
812
+ await super().close()
813
+
814
+
815
+ async def get_range(session, url, start, end, file=None, **kwargs):
816
+ # explicit get a range when we know it must be safe
817
+ kwargs = kwargs.copy()
818
+ headers = kwargs.pop("headers", {}).copy()
819
+ headers["Range"] = f"bytes={start}-{end - 1}"
820
+ r = await session.get(url, headers=headers, **kwargs)
821
+ r.raise_for_status()
822
+ async with r:
823
+ out = await r.read()
824
+ if file:
825
+ with open(file, "r+b") as f: # noqa: ASYNC230
826
+ f.seek(start)
827
+ f.write(out)
828
+ else:
829
+ return out
830
+
831
+
832
+ async def _file_info(url, session, size_policy="head", **kwargs):
833
+ """Call HEAD on the server to get details about the file (size/checksum etc.)
834
+
835
+ Default operation is to explicitly allow redirects and use encoding
836
+ 'identity' (no compression) to get the true size of the target.
837
+ """
838
+ logger.debug("Retrieve file size for %s", url)
839
+ kwargs = kwargs.copy()
840
+ ar = kwargs.pop("allow_redirects", True)
841
+ head = kwargs.get("headers", {}).copy()
842
+ head["Accept-Encoding"] = "identity"
843
+ kwargs["headers"] = head
844
+
845
+ info = {}
846
+ if size_policy == "head":
847
+ r = await session.head(url, allow_redirects=ar, **kwargs)
848
+ elif size_policy == "get":
849
+ r = await session.get(url, allow_redirects=ar, **kwargs)
850
+ else:
851
+ raise TypeError(f'size_policy must be "head" or "get", got {size_policy}')
852
+ async with r:
853
+ r.raise_for_status()
854
+
855
+ if "Content-Length" in r.headers:
856
+ # Some servers may choose to ignore Accept-Encoding and return
857
+ # compressed content, in which case the returned size is unreliable.
858
+ if "Content-Encoding" not in r.headers or r.headers["Content-Encoding"] in [
859
+ "identity",
860
+ "",
861
+ ]:
862
+ info["size"] = int(r.headers["Content-Length"])
863
+ elif "Content-Range" in r.headers:
864
+ info["size"] = int(r.headers["Content-Range"].split("/")[1])
865
+
866
+ if "Content-Type" in r.headers:
867
+ info["mimetype"] = r.headers["Content-Type"].partition(";")[0]
868
+
869
+ if r.headers.get("Accept-Ranges") == "none":
870
+ # Some servers may explicitly discourage partial content requests, but
871
+ # the lack of "Accept-Ranges" does not always indicate they would fail
872
+ info["partial"] = False
873
+
874
+ info["url"] = str(r.url)
875
+
876
+ for checksum_field in ["ETag", "Content-MD5", "Digest", "Last-Modified"]:
877
+ if r.headers.get(checksum_field):
878
+ info[checksum_field] = r.headers[checksum_field]
879
+
880
+ return info
881
+
882
+
883
+ async def _file_size(url, session=None, *args, **kwargs):
884
+ if session is None:
885
+ session = await get_client()
886
+ info = await _file_info(url, session=session, *args, **kwargs)
887
+ return info.get("size")
888
+
889
+
890
+ file_size = sync_wrapper(_file_size)
tool_server/.venv/lib/python3.12/site-packages/fsspec/implementations/http_sync.py ADDED
@@ -0,0 +1,931 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """This file is largely copied from http.py"""
2
+
3
+ import io
4
+ import logging
5
+ import re
6
+ import urllib.error
7
+ import urllib.parse
8
+ from copy import copy
9
+ from json import dumps, loads
10
+ from urllib.parse import urlparse
11
+
12
+ try:
13
+ import yarl
14
+ except (ImportError, ModuleNotFoundError, OSError):
15
+ yarl = False
16
+
17
+ from fsspec.callbacks import _DEFAULT_CALLBACK
18
+ from fsspec.registry import register_implementation
19
+ from fsspec.spec import AbstractBufferedFile, AbstractFileSystem
20
+ from fsspec.utils import DEFAULT_BLOCK_SIZE, isfilelike, nullcontext, tokenize
21
+
22
+ from ..caching import AllBytes
23
+
24
+ # https://stackoverflow.com/a/15926317/3821154
25
+ ex = re.compile(r"""<(a|A)\s+(?:[^>]*?\s+)?(href|HREF)=["'](?P<url>[^"']+)""")
26
+ ex2 = re.compile(r"""(?P<url>http[s]?://[-a-zA-Z0-9@:%_+.~#?&/=]+)""")
27
+ logger = logging.getLogger("fsspec.http")
28
+
29
+
30
+ class JsHttpException(urllib.error.HTTPError): ...
31
+
32
+
33
+ class StreamIO(io.BytesIO):
34
+ # fake class, so you can set attributes on it
35
+ # will eventually actually stream
36
+ ...
37
+
38
+
39
+ class ResponseProxy:
40
+ """Looks like a requests response"""
41
+
42
+ def __init__(self, req, stream=False):
43
+ self.request = req
44
+ self.stream = stream
45
+ self._data = None
46
+ self._headers = None
47
+
48
+ @property
49
+ def raw(self):
50
+ if self._data is None:
51
+ b = self.request.response.to_bytes()
52
+ if self.stream:
53
+ self._data = StreamIO(b)
54
+ else:
55
+ self._data = b
56
+ return self._data
57
+
58
+ def close(self):
59
+ if hasattr(self, "_data"):
60
+ del self._data
61
+
62
+ @property
63
+ def headers(self):
64
+ if self._headers is None:
65
+ self._headers = dict(
66
+ [
67
+ _.split(": ")
68
+ for _ in self.request.getAllResponseHeaders().strip().split("\r\n")
69
+ ]
70
+ )
71
+ return self._headers
72
+
73
+ @property
74
+ def status_code(self):
75
+ return int(self.request.status)
76
+
77
+ def raise_for_status(self):
78
+ if not self.ok:
79
+ raise JsHttpException(
80
+ self.url, self.status_code, self.reason, self.headers, None
81
+ )
82
+
83
+ def iter_content(self, chunksize, *_, **__):
84
+ while True:
85
+ out = self.raw.read(chunksize)
86
+ if out:
87
+ yield out
88
+ else:
89
+ break
90
+
91
+ @property
92
+ def reason(self):
93
+ return self.request.statusText
94
+
95
+ @property
96
+ def ok(self):
97
+ return self.status_code < 400
98
+
99
+ @property
100
+ def url(self):
101
+ return self.request.response.responseURL
102
+
103
+ @property
104
+ def text(self):
105
+ # TODO: encoding from headers
106
+ return self.content.decode()
107
+
108
+ @property
109
+ def content(self):
110
+ self.stream = False
111
+ return self.raw
112
+
113
+ def json(self):
114
+ return loads(self.text)
115
+
116
+
117
+ class RequestsSessionShim:
118
+ def __init__(self):
119
+ self.headers = {}
120
+
121
+ def request(
122
+ self,
123
+ method,
124
+ url,
125
+ params=None,
126
+ data=None,
127
+ headers=None,
128
+ cookies=None,
129
+ files=None,
130
+ auth=None,
131
+ timeout=None,
132
+ allow_redirects=None,
133
+ proxies=None,
134
+ hooks=None,
135
+ stream=None,
136
+ verify=None,
137
+ cert=None,
138
+ json=None,
139
+ ):
140
+ from js import Blob, XMLHttpRequest
141
+
142
+ logger.debug("JS request: %s %s", method, url)
143
+
144
+ if cert or verify or proxies or files or cookies or hooks:
145
+ raise NotImplementedError
146
+ if data and json:
147
+ raise ValueError("Use json= or data=, not both")
148
+ req = XMLHttpRequest.new()
149
+ extra = auth if auth else ()
150
+ if params:
151
+ url = f"{url}?{urllib.parse.urlencode(params)}"
152
+ req.open(method, url, False, *extra)
153
+ if timeout:
154
+ req.timeout = timeout
155
+ if headers:
156
+ for k, v in headers.items():
157
+ req.setRequestHeader(k, v)
158
+
159
+ req.setRequestHeader("Accept", "application/octet-stream")
160
+ req.responseType = "arraybuffer"
161
+ if json:
162
+ blob = Blob.new([dumps(data)], {type: "application/json"})
163
+ req.send(blob)
164
+ elif data:
165
+ if isinstance(data, io.IOBase):
166
+ data = data.read()
167
+ blob = Blob.new([data], {type: "application/octet-stream"})
168
+ req.send(blob)
169
+ else:
170
+ req.send(None)
171
+ return ResponseProxy(req, stream=stream)
172
+
173
+ def get(self, url, **kwargs):
174
+ return self.request("GET", url, **kwargs)
175
+
176
+ def head(self, url, **kwargs):
177
+ return self.request("HEAD", url, **kwargs)
178
+
179
+ def post(self, url, **kwargs):
180
+ return self.request("POST}", url, **kwargs)
181
+
182
+ def put(self, url, **kwargs):
183
+ return self.request("PUT", url, **kwargs)
184
+
185
+ def patch(self, url, **kwargs):
186
+ return self.request("PATCH", url, **kwargs)
187
+
188
+ def delete(self, url, **kwargs):
189
+ return self.request("DELETE", url, **kwargs)
190
+
191
+
192
+ class HTTPFileSystem(AbstractFileSystem):
193
+ """
194
+ Simple File-System for fetching data via HTTP(S)
195
+
196
+ This is the BLOCKING version of the normal HTTPFileSystem. It uses
197
+ requests in normal python and the JS runtime in pyodide.
198
+
199
+ ***This implementation is extremely experimental, do not use unless
200
+ you are testing pyodide/pyscript integration***
201
+ """
202
+
203
+ protocol = ("http", "https", "sync-http", "sync-https")
204
+ sep = "/"
205
+
206
+ def __init__(
207
+ self,
208
+ simple_links=True,
209
+ block_size=None,
210
+ same_scheme=True,
211
+ cache_type="readahead",
212
+ cache_options=None,
213
+ client_kwargs=None,
214
+ encoded=False,
215
+ **storage_options,
216
+ ):
217
+ """
218
+
219
+ Parameters
220
+ ----------
221
+ block_size: int
222
+ Blocks to read bytes; if 0, will default to raw requests file-like
223
+ objects instead of HTTPFile instances
224
+ simple_links: bool
225
+ If True, will consider both HTML <a> tags and anything that looks
226
+ like a URL; if False, will consider only the former.
227
+ same_scheme: True
228
+ When doing ls/glob, if this is True, only consider paths that have
229
+ http/https matching the input URLs.
230
+ size_policy: this argument is deprecated
231
+ client_kwargs: dict
232
+ Passed to aiohttp.ClientSession, see
233
+ https://docs.aiohttp.org/en/stable/client_reference.html
234
+ For example, ``{'auth': aiohttp.BasicAuth('user', 'pass')}``
235
+ storage_options: key-value
236
+ Any other parameters passed on to requests
237
+ cache_type, cache_options: defaults used in open
238
+ """
239
+ super().__init__(self, **storage_options)
240
+ self.block_size = block_size if block_size is not None else DEFAULT_BLOCK_SIZE
241
+ self.simple_links = simple_links
242
+ self.same_schema = same_scheme
243
+ self.cache_type = cache_type
244
+ self.cache_options = cache_options
245
+ self.client_kwargs = client_kwargs or {}
246
+ self.encoded = encoded
247
+ self.kwargs = storage_options
248
+
249
+ try:
250
+ import js # noqa: F401
251
+
252
+ logger.debug("Starting JS session")
253
+ self.session = RequestsSessionShim()
254
+ self.js = True
255
+ except Exception as e:
256
+ import requests
257
+
258
+ logger.debug("Starting cpython session because of: %s", e)
259
+ self.session = requests.Session(**(client_kwargs or {}))
260
+ self.js = False
261
+
262
+ request_options = copy(storage_options)
263
+ self.use_listings_cache = request_options.pop("use_listings_cache", False)
264
+ request_options.pop("listings_expiry_time", None)
265
+ request_options.pop("max_paths", None)
266
+ request_options.pop("skip_instance_cache", None)
267
+ self.kwargs = request_options
268
+
269
+ @property
270
+ def fsid(self):
271
+ return "sync-http"
272
+
273
+ def encode_url(self, url):
274
+ if yarl:
275
+ return yarl.URL(url, encoded=self.encoded)
276
+ return url
277
+
278
+ @classmethod
279
+ def _strip_protocol(cls, path: str) -> str:
280
+ """For HTTP, we always want to keep the full URL"""
281
+ path = path.replace("sync-http://", "http://").replace(
282
+ "sync-https://", "https://"
283
+ )
284
+ return path
285
+
286
+ @classmethod
287
+ def _parent(cls, path):
288
+ # override, since _strip_protocol is different for URLs
289
+ par = super()._parent(path)
290
+ if len(par) > 7: # "http://..."
291
+ return par
292
+ return ""
293
+
294
+ def _ls_real(self, url, detail=True, **kwargs):
295
+ # ignoring URL-encoded arguments
296
+ kw = self.kwargs.copy()
297
+ kw.update(kwargs)
298
+ logger.debug(url)
299
+ r = self.session.get(self.encode_url(url), **self.kwargs)
300
+ self._raise_not_found_for_status(r, url)
301
+ text = r.text
302
+ if self.simple_links:
303
+ links = ex2.findall(text) + [u[2] for u in ex.findall(text)]
304
+ else:
305
+ links = [u[2] for u in ex.findall(text)]
306
+ out = set()
307
+ parts = urlparse(url)
308
+ for l in links:
309
+ if isinstance(l, tuple):
310
+ l = l[1]
311
+ if l.startswith("/") and len(l) > 1:
312
+ # absolute URL on this server
313
+ l = parts.scheme + "://" + parts.netloc + l
314
+ if l.startswith("http"):
315
+ if self.same_schema and l.startswith(url.rstrip("/") + "/"):
316
+ out.add(l)
317
+ elif l.replace("https", "http").startswith(
318
+ url.replace("https", "http").rstrip("/") + "/"
319
+ ):
320
+ # allowed to cross http <-> https
321
+ out.add(l)
322
+ else:
323
+ if l not in ["..", "../"]:
324
+ # Ignore FTP-like "parent"
325
+ out.add("/".join([url.rstrip("/"), l.lstrip("/")]))
326
+ if not out and url.endswith("/"):
327
+ out = self._ls_real(url.rstrip("/"), detail=False)
328
+ if detail:
329
+ return [
330
+ {
331
+ "name": u,
332
+ "size": None,
333
+ "type": "directory" if u.endswith("/") else "file",
334
+ }
335
+ for u in out
336
+ ]
337
+ else:
338
+ return sorted(out)
339
+
340
+ def ls(self, url, detail=True, **kwargs):
341
+ if self.use_listings_cache and url in self.dircache:
342
+ out = self.dircache[url]
343
+ else:
344
+ out = self._ls_real(url, detail=detail, **kwargs)
345
+ self.dircache[url] = out
346
+ return out
347
+
348
+ def _raise_not_found_for_status(self, response, url):
349
+ """
350
+ Raises FileNotFoundError for 404s, otherwise uses raise_for_status.
351
+ """
352
+ if response.status_code == 404:
353
+ raise FileNotFoundError(url)
354
+ response.raise_for_status()
355
+
356
+ def cat_file(self, url, start=None, end=None, **kwargs):
357
+ kw = self.kwargs.copy()
358
+ kw.update(kwargs)
359
+ logger.debug(url)
360
+
361
+ if start is not None or end is not None:
362
+ if start == end:
363
+ return b""
364
+ headers = kw.pop("headers", {}).copy()
365
+
366
+ headers["Range"] = self._process_limits(url, start, end)
367
+ kw["headers"] = headers
368
+ r = self.session.get(self.encode_url(url), **kw)
369
+ self._raise_not_found_for_status(r, url)
370
+ return r.content
371
+
372
+ def get_file(
373
+ self, rpath, lpath, chunk_size=5 * 2**20, callback=_DEFAULT_CALLBACK, **kwargs
374
+ ):
375
+ kw = self.kwargs.copy()
376
+ kw.update(kwargs)
377
+ logger.debug(rpath)
378
+ r = self.session.get(self.encode_url(rpath), **kw)
379
+ try:
380
+ size = int(
381
+ r.headers.get("content-length", None)
382
+ or r.headers.get("Content-Length", None)
383
+ )
384
+ except (ValueError, KeyError, TypeError):
385
+ size = None
386
+
387
+ callback.set_size(size)
388
+ self._raise_not_found_for_status(r, rpath)
389
+ if not isfilelike(lpath):
390
+ lpath = open(lpath, "wb")
391
+ for chunk in r.iter_content(chunk_size, decode_unicode=False):
392
+ lpath.write(chunk)
393
+ callback.relative_update(len(chunk))
394
+
395
+ def put_file(
396
+ self,
397
+ lpath,
398
+ rpath,
399
+ chunk_size=5 * 2**20,
400
+ callback=_DEFAULT_CALLBACK,
401
+ method="post",
402
+ **kwargs,
403
+ ):
404
+ def gen_chunks():
405
+ # Support passing arbitrary file-like objects
406
+ # and use them instead of streams.
407
+ if isinstance(lpath, io.IOBase):
408
+ context = nullcontext(lpath)
409
+ use_seek = False # might not support seeking
410
+ else:
411
+ context = open(lpath, "rb")
412
+ use_seek = True
413
+
414
+ with context as f:
415
+ if use_seek:
416
+ callback.set_size(f.seek(0, 2))
417
+ f.seek(0)
418
+ else:
419
+ callback.set_size(getattr(f, "size", None))
420
+
421
+ chunk = f.read(chunk_size)
422
+ while chunk:
423
+ yield chunk
424
+ callback.relative_update(len(chunk))
425
+ chunk = f.read(chunk_size)
426
+
427
+ kw = self.kwargs.copy()
428
+ kw.update(kwargs)
429
+
430
+ method = method.lower()
431
+ if method not in ("post", "put"):
432
+ raise ValueError(
433
+ f"method has to be either 'post' or 'put', not: {method!r}"
434
+ )
435
+
436
+ meth = getattr(self.session, method)
437
+ resp = meth(rpath, data=gen_chunks(), **kw)
438
+ self._raise_not_found_for_status(resp, rpath)
439
+
440
+ def _process_limits(self, url, start, end):
441
+ """Helper for "Range"-based _cat_file"""
442
+ size = None
443
+ suff = False
444
+ if start is not None and start < 0:
445
+ # if start is negative and end None, end is the "suffix length"
446
+ if end is None:
447
+ end = -start
448
+ start = ""
449
+ suff = True
450
+ else:
451
+ size = size or self.info(url)["size"]
452
+ start = size + start
453
+ elif start is None:
454
+ start = 0
455
+ if not suff:
456
+ if end is not None and end < 0:
457
+ if start is not None:
458
+ size = size or self.info(url)["size"]
459
+ end = size + end
460
+ elif end is None:
461
+ end = ""
462
+ if isinstance(end, int):
463
+ end -= 1 # bytes range is inclusive
464
+ return f"bytes={start}-{end}"
465
+
466
+ def exists(self, path, **kwargs):
467
+ kw = self.kwargs.copy()
468
+ kw.update(kwargs)
469
+ try:
470
+ logger.debug(path)
471
+ r = self.session.get(self.encode_url(path), **kw)
472
+ return r.status_code < 400
473
+ except Exception:
474
+ return False
475
+
476
+ def isfile(self, path, **kwargs):
477
+ return self.exists(path, **kwargs)
478
+
479
+ def _open(
480
+ self,
481
+ path,
482
+ mode="rb",
483
+ block_size=None,
484
+ autocommit=None, # XXX: This differs from the base class.
485
+ cache_type=None,
486
+ cache_options=None,
487
+ size=None,
488
+ **kwargs,
489
+ ):
490
+ """Make a file-like object
491
+
492
+ Parameters
493
+ ----------
494
+ path: str
495
+ Full URL with protocol
496
+ mode: string
497
+ must be "rb"
498
+ block_size: int or None
499
+ Bytes to download in one request; use instance value if None. If
500
+ zero, will return a streaming Requests file-like instance.
501
+ kwargs: key-value
502
+ Any other parameters, passed to requests calls
503
+ """
504
+ if mode != "rb":
505
+ raise NotImplementedError
506
+ block_size = block_size if block_size is not None else self.block_size
507
+ kw = self.kwargs.copy()
508
+ kw.update(kwargs)
509
+ size = size or self.info(path, **kwargs)["size"]
510
+ if block_size and size:
511
+ return HTTPFile(
512
+ self,
513
+ path,
514
+ session=self.session,
515
+ block_size=block_size,
516
+ mode=mode,
517
+ size=size,
518
+ cache_type=cache_type or self.cache_type,
519
+ cache_options=cache_options or self.cache_options,
520
+ **kw,
521
+ )
522
+ else:
523
+ return HTTPStreamFile(
524
+ self,
525
+ path,
526
+ mode=mode,
527
+ session=self.session,
528
+ **kw,
529
+ )
530
+
531
+ def ukey(self, url):
532
+ """Unique identifier; assume HTTP files are static, unchanging"""
533
+ return tokenize(url, self.kwargs, self.protocol)
534
+
535
+ def info(self, url, **kwargs):
536
+ """Get info of URL
537
+
538
+ Tries to access location via HEAD, and then GET methods, but does
539
+ not fetch the data.
540
+
541
+ It is possible that the server does not supply any size information, in
542
+ which case size will be given as None (and certain operations on the
543
+ corresponding file will not work).
544
+ """
545
+ info = {}
546
+ for policy in ["head", "get"]:
547
+ try:
548
+ info.update(
549
+ _file_info(
550
+ self.encode_url(url),
551
+ size_policy=policy,
552
+ session=self.session,
553
+ **self.kwargs,
554
+ **kwargs,
555
+ )
556
+ )
557
+ if info.get("size") is not None:
558
+ break
559
+ except Exception as exc:
560
+ if policy == "get":
561
+ # If get failed, then raise a FileNotFoundError
562
+ raise FileNotFoundError(url) from exc
563
+ logger.debug(str(exc))
564
+
565
+ return {"name": url, "size": None, **info, "type": "file"}
566
+
567
+ def glob(self, path, maxdepth=None, **kwargs):
568
+ """
569
+ Find files by glob-matching.
570
+
571
+ This implementation is idntical to the one in AbstractFileSystem,
572
+ but "?" is not considered as a character for globbing, because it is
573
+ so common in URLs, often identifying the "query" part.
574
+ """
575
+ import re
576
+
577
+ ends = path.endswith("/")
578
+ path = self._strip_protocol(path)
579
+ indstar = path.find("*") if path.find("*") >= 0 else len(path)
580
+ indbrace = path.find("[") if path.find("[") >= 0 else len(path)
581
+
582
+ ind = min(indstar, indbrace)
583
+
584
+ detail = kwargs.pop("detail", False)
585
+
586
+ if not has_magic(path):
587
+ root = path
588
+ depth = 1
589
+ if ends:
590
+ path += "/*"
591
+ elif self.exists(path):
592
+ if not detail:
593
+ return [path]
594
+ else:
595
+ return {path: self.info(path)}
596
+ else:
597
+ if not detail:
598
+ return [] # glob of non-existent returns empty
599
+ else:
600
+ return {}
601
+ elif "/" in path[:ind]:
602
+ ind2 = path[:ind].rindex("/")
603
+ root = path[: ind2 + 1]
604
+ depth = None if "**" in path else path[ind2 + 1 :].count("/") + 1
605
+ else:
606
+ root = ""
607
+ depth = None if "**" in path else path[ind + 1 :].count("/") + 1
608
+
609
+ allpaths = self.find(
610
+ root, maxdepth=maxdepth or depth, withdirs=True, detail=True, **kwargs
611
+ )
612
+ # Escape characters special to python regex, leaving our supported
613
+ # special characters in place.
614
+ # See https://www.gnu.org/software/bash/manual/html_node/Pattern-Matching.html
615
+ # for shell globbing details.
616
+ pattern = (
617
+ "^"
618
+ + (
619
+ path.replace("\\", r"\\")
620
+ .replace(".", r"\.")
621
+ .replace("+", r"\+")
622
+ .replace("//", "/")
623
+ .replace("(", r"\(")
624
+ .replace(")", r"\)")
625
+ .replace("|", r"\|")
626
+ .replace("^", r"\^")
627
+ .replace("$", r"\$")
628
+ .replace("{", r"\{")
629
+ .replace("}", r"\}")
630
+ .rstrip("/")
631
+ )
632
+ + "$"
633
+ )
634
+ pattern = re.sub("[*]{2}", "=PLACEHOLDER=", pattern)
635
+ pattern = re.sub("[*]", "[^/]*", pattern)
636
+ pattern = re.compile(pattern.replace("=PLACEHOLDER=", ".*"))
637
+ out = {
638
+ p: allpaths[p]
639
+ for p in sorted(allpaths)
640
+ if pattern.match(p.replace("//", "/").rstrip("/"))
641
+ }
642
+ if detail:
643
+ return out
644
+ else:
645
+ return list(out)
646
+
647
+ def isdir(self, path):
648
+ # override, since all URLs are (also) files
649
+ try:
650
+ return bool(self.ls(path))
651
+ except (FileNotFoundError, ValueError):
652
+ return False
653
+
654
+
655
+ class HTTPFile(AbstractBufferedFile):
656
+ """
657
+ A file-like object pointing to a remove HTTP(S) resource
658
+
659
+ Supports only reading, with read-ahead of a predermined block-size.
660
+
661
+ In the case that the server does not supply the filesize, only reading of
662
+ the complete file in one go is supported.
663
+
664
+ Parameters
665
+ ----------
666
+ url: str
667
+ Full URL of the remote resource, including the protocol
668
+ session: requests.Session or None
669
+ All calls will be made within this session, to avoid restarting
670
+ connections where the server allows this
671
+ block_size: int or None
672
+ The amount of read-ahead to do, in bytes. Default is 5MB, or the value
673
+ configured for the FileSystem creating this file
674
+ size: None or int
675
+ If given, this is the size of the file in bytes, and we don't attempt
676
+ to call the server to find the value.
677
+ kwargs: all other key-values are passed to requests calls.
678
+ """
679
+
680
+ def __init__(
681
+ self,
682
+ fs,
683
+ url,
684
+ session=None,
685
+ block_size=None,
686
+ mode="rb",
687
+ cache_type="bytes",
688
+ cache_options=None,
689
+ size=None,
690
+ **kwargs,
691
+ ):
692
+ if mode != "rb":
693
+ raise NotImplementedError("File mode not supported")
694
+ self.url = url
695
+ self.session = session
696
+ self.details = {"name": url, "size": size, "type": "file"}
697
+ super().__init__(
698
+ fs=fs,
699
+ path=url,
700
+ mode=mode,
701
+ block_size=block_size,
702
+ cache_type=cache_type,
703
+ cache_options=cache_options,
704
+ **kwargs,
705
+ )
706
+
707
+ def read(self, length=-1):
708
+ """Read bytes from file
709
+
710
+ Parameters
711
+ ----------
712
+ length: int
713
+ Read up to this many bytes. If negative, read all content to end of
714
+ file. If the server has not supplied the filesize, attempting to
715
+ read only part of the data will raise a ValueError.
716
+ """
717
+ if (
718
+ (length < 0 and self.loc == 0) # explicit read all
719
+ # but not when the size is known and fits into a block anyways
720
+ and not (self.size is not None and self.size <= self.blocksize)
721
+ ):
722
+ self._fetch_all()
723
+ if self.size is None:
724
+ if length < 0:
725
+ self._fetch_all()
726
+ else:
727
+ length = min(self.size - self.loc, length)
728
+ return super().read(length)
729
+
730
+ def _fetch_all(self):
731
+ """Read whole file in one shot, without caching
732
+
733
+ This is only called when position is still at zero,
734
+ and read() is called without a byte-count.
735
+ """
736
+ logger.debug(f"Fetch all for {self}")
737
+ if not isinstance(self.cache, AllBytes):
738
+ r = self.session.get(self.fs.encode_url(self.url), **self.kwargs)
739
+ r.raise_for_status()
740
+ out = r.content
741
+ self.cache = AllBytes(size=len(out), fetcher=None, blocksize=None, data=out)
742
+ self.size = len(out)
743
+
744
+ def _parse_content_range(self, headers):
745
+ """Parse the Content-Range header"""
746
+ s = headers.get("Content-Range", "")
747
+ m = re.match(r"bytes (\d+-\d+|\*)/(\d+|\*)", s)
748
+ if not m:
749
+ return None, None, None
750
+
751
+ if m[1] == "*":
752
+ start = end = None
753
+ else:
754
+ start, end = [int(x) for x in m[1].split("-")]
755
+ total = None if m[2] == "*" else int(m[2])
756
+ return start, end, total
757
+
758
+ def _fetch_range(self, start, end):
759
+ """Download a block of data
760
+
761
+ The expectation is that the server returns only the requested bytes,
762
+ with HTTP code 206. If this is not the case, we first check the headers,
763
+ and then stream the output - if the data size is bigger than we
764
+ requested, an exception is raised.
765
+ """
766
+ logger.debug(f"Fetch range for {self}: {start}-{end}")
767
+ kwargs = self.kwargs.copy()
768
+ headers = kwargs.pop("headers", {}).copy()
769
+ headers["Range"] = f"bytes={start}-{end - 1}"
770
+ logger.debug("%s : %s", self.url, headers["Range"])
771
+ r = self.session.get(self.fs.encode_url(self.url), headers=headers, **kwargs)
772
+ if r.status_code == 416:
773
+ # range request outside file
774
+ return b""
775
+ r.raise_for_status()
776
+
777
+ # If the server has handled the range request, it should reply
778
+ # with status 206 (partial content). But we'll guess that a suitable
779
+ # Content-Range header or a Content-Length no more than the
780
+ # requested range also mean we have got the desired range.
781
+ cl = r.headers.get("Content-Length", r.headers.get("content-length", end + 1))
782
+ response_is_range = (
783
+ r.status_code == 206
784
+ or self._parse_content_range(r.headers)[0] == start
785
+ or int(cl) <= end - start
786
+ )
787
+
788
+ if response_is_range:
789
+ # partial content, as expected
790
+ out = r.content
791
+ elif start > 0:
792
+ raise ValueError(
793
+ "The HTTP server doesn't appear to support range requests. "
794
+ "Only reading this file from the beginning is supported. "
795
+ "Open with block_size=0 for a streaming file interface."
796
+ )
797
+ else:
798
+ # Response is not a range, but we want the start of the file,
799
+ # so we can read the required amount anyway.
800
+ cl = 0
801
+ out = []
802
+ for chunk in r.iter_content(2**20, False):
803
+ out.append(chunk)
804
+ cl += len(chunk)
805
+ out = b"".join(out)[: end - start]
806
+ return out
807
+
808
+
809
+ magic_check = re.compile("([*[])")
810
+
811
+
812
+ def has_magic(s):
813
+ match = magic_check.search(s)
814
+ return match is not None
815
+
816
+
817
+ class HTTPStreamFile(AbstractBufferedFile):
818
+ def __init__(self, fs, url, mode="rb", session=None, **kwargs):
819
+ self.url = url
820
+ self.session = session
821
+ if mode != "rb":
822
+ raise ValueError
823
+ self.details = {"name": url, "size": None}
824
+ super().__init__(fs=fs, path=url, mode=mode, cache_type="readahead", **kwargs)
825
+
826
+ r = self.session.get(self.fs.encode_url(url), stream=True, **kwargs)
827
+ self.fs._raise_not_found_for_status(r, url)
828
+ self.it = r.iter_content(1024, False)
829
+ self.leftover = b""
830
+
831
+ self.r = r
832
+
833
+ def seek(self, *args, **kwargs):
834
+ raise ValueError("Cannot seek streaming HTTP file")
835
+
836
+ def read(self, num=-1):
837
+ bufs = [self.leftover]
838
+ leng = len(self.leftover)
839
+ while leng < num or num < 0:
840
+ try:
841
+ out = self.it.__next__()
842
+ except StopIteration:
843
+ break
844
+ if out:
845
+ bufs.append(out)
846
+ else:
847
+ break
848
+ leng += len(out)
849
+ out = b"".join(bufs)
850
+ if num >= 0:
851
+ self.leftover = out[num:]
852
+ out = out[:num]
853
+ else:
854
+ self.leftover = b""
855
+ self.loc += len(out)
856
+ return out
857
+
858
+ def close(self):
859
+ self.r.close()
860
+ self.closed = True
861
+
862
+
863
+ def get_range(session, url, start, end, **kwargs):
864
+ # explicit get a range when we know it must be safe
865
+ kwargs = kwargs.copy()
866
+ headers = kwargs.pop("headers", {}).copy()
867
+ headers["Range"] = f"bytes={start}-{end - 1}"
868
+ r = session.get(url, headers=headers, **kwargs)
869
+ r.raise_for_status()
870
+ return r.content
871
+
872
+
873
+ def _file_info(url, session, size_policy="head", **kwargs):
874
+ """Call HEAD on the server to get details about the file (size/checksum etc.)
875
+
876
+ Default operation is to explicitly allow redirects and use encoding
877
+ 'identity' (no compression) to get the true size of the target.
878
+ """
879
+ logger.debug("Retrieve file size for %s", url)
880
+ kwargs = kwargs.copy()
881
+ ar = kwargs.pop("allow_redirects", True)
882
+ head = kwargs.get("headers", {}).copy()
883
+ # TODO: not allowed in JS
884
+ # head["Accept-Encoding"] = "identity"
885
+ kwargs["headers"] = head
886
+
887
+ info = {}
888
+ if size_policy == "head":
889
+ r = session.head(url, allow_redirects=ar, **kwargs)
890
+ elif size_policy == "get":
891
+ r = session.get(url, allow_redirects=ar, **kwargs)
892
+ else:
893
+ raise TypeError(f'size_policy must be "head" or "get", got {size_policy}')
894
+ r.raise_for_status()
895
+
896
+ # TODO:
897
+ # recognise lack of 'Accept-Ranges',
898
+ # or 'Accept-Ranges': 'none' (not 'bytes')
899
+ # to mean streaming only, no random access => return None
900
+ if "Content-Length" in r.headers:
901
+ info["size"] = int(r.headers["Content-Length"])
902
+ elif "Content-Range" in r.headers:
903
+ info["size"] = int(r.headers["Content-Range"].split("/")[1])
904
+ elif "content-length" in r.headers:
905
+ info["size"] = int(r.headers["content-length"])
906
+ elif "content-range" in r.headers:
907
+ info["size"] = int(r.headers["content-range"].split("/")[1])
908
+
909
+ for checksum_field in ["ETag", "Content-MD5", "Digest"]:
910
+ if r.headers.get(checksum_field):
911
+ info[checksum_field] = r.headers[checksum_field]
912
+
913
+ return info
914
+
915
+
916
+ # importing this is enough to register it
917
+ def register():
918
+ register_implementation("http", HTTPFileSystem, clobber=True)
919
+ register_implementation("https", HTTPFileSystem, clobber=True)
920
+ register_implementation("sync-http", HTTPFileSystem, clobber=True)
921
+ register_implementation("sync-https", HTTPFileSystem, clobber=True)
922
+
923
+
924
+ register()
925
+
926
+
927
+ def unregister():
928
+ from fsspec.implementations.http import HTTPFileSystem
929
+
930
+ register_implementation("http", HTTPFileSystem, clobber=True)
931
+ register_implementation("https", HTTPFileSystem, clobber=True)