ZTWHHH commited on
Commit
d6487e0
·
verified ·
1 Parent(s): 7489a61

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. mgm/lib/python3.10/site-packages/cmake/data/doc/cmake-3.31/Copyright.txt +136 -0
  2. mgm/lib/python3.10/site-packages/cmake/data/doc/cmake-3.31/cmcppdap/NOTICE +5 -0
  3. mgm/lib/python3.10/site-packages/cmake/data/doc/cmake-3.31/cmcurl/COPYING +22 -0
  4. mgm/lib/python3.10/site-packages/cmake/data/doc/cmake-3.31/cmlibarchive/COPYING +65 -0
  5. mgm/lib/python3.10/site-packages/cmake/data/doc/cmake-3.31/cmsys/Copyright.txt +38 -0
  6. mgm/lib/python3.10/site-packages/cmake/data/doc/cmake-3.31/cmzlib/Copyright.txt +23 -0
  7. mgm/lib/python3.10/site-packages/fsspec/__init__.py +69 -0
  8. mgm/lib/python3.10/site-packages/fsspec/__pycache__/_version.cpython-310.pyc +0 -0
  9. mgm/lib/python3.10/site-packages/fsspec/__pycache__/gui.cpython-310.pyc +0 -0
  10. mgm/lib/python3.10/site-packages/fsspec/__pycache__/registry.cpython-310.pyc +0 -0
  11. mgm/lib/python3.10/site-packages/fsspec/_version.py +16 -0
  12. mgm/lib/python3.10/site-packages/fsspec/caching.py +966 -0
  13. mgm/lib/python3.10/site-packages/fsspec/callbacks.py +324 -0
  14. mgm/lib/python3.10/site-packages/fsspec/compression.py +175 -0
  15. mgm/lib/python3.10/site-packages/fsspec/conftest.py +55 -0
  16. mgm/lib/python3.10/site-packages/fsspec/dircache.py +98 -0
  17. mgm/lib/python3.10/site-packages/fsspec/gui.py +416 -0
  18. mgm/lib/python3.10/site-packages/fsspec/implementations/__init__.py +0 -0
  19. mgm/lib/python3.10/site-packages/fsspec/implementations/__pycache__/__init__.cpython-310.pyc +0 -0
  20. mgm/lib/python3.10/site-packages/fsspec/implementations/__pycache__/cached.cpython-310.pyc +0 -0
  21. mgm/lib/python3.10/site-packages/fsspec/implementations/__pycache__/dask.cpython-310.pyc +0 -0
  22. mgm/lib/python3.10/site-packages/fsspec/implementations/__pycache__/dbfs.cpython-310.pyc +0 -0
  23. mgm/lib/python3.10/site-packages/fsspec/implementations/__pycache__/dirfs.cpython-310.pyc +0 -0
  24. mgm/lib/python3.10/site-packages/fsspec/implementations/__pycache__/ftp.cpython-310.pyc +0 -0
  25. mgm/lib/python3.10/site-packages/fsspec/implementations/__pycache__/git.cpython-310.pyc +0 -0
  26. mgm/lib/python3.10/site-packages/fsspec/implementations/__pycache__/http.cpython-310.pyc +0 -0
  27. mgm/lib/python3.10/site-packages/fsspec/implementations/__pycache__/jupyter.cpython-310.pyc +0 -0
  28. mgm/lib/python3.10/site-packages/fsspec/implementations/__pycache__/local.cpython-310.pyc +0 -0
  29. mgm/lib/python3.10/site-packages/fsspec/implementations/__pycache__/reference.cpython-310.pyc +0 -0
  30. mgm/lib/python3.10/site-packages/fsspec/implementations/__pycache__/sftp.cpython-310.pyc +0 -0
  31. mgm/lib/python3.10/site-packages/fsspec/implementations/__pycache__/webhdfs.cpython-310.pyc +0 -0
  32. mgm/lib/python3.10/site-packages/fsspec/implementations/__pycache__/zip.cpython-310.pyc +0 -0
  33. mgm/lib/python3.10/site-packages/fsspec/implementations/arrow.py +304 -0
  34. mgm/lib/python3.10/site-packages/fsspec/implementations/asyn_wrapper.py +98 -0
  35. mgm/lib/python3.10/site-packages/fsspec/implementations/cache_mapper.py +75 -0
  36. mgm/lib/python3.10/site-packages/fsspec/implementations/cache_metadata.py +232 -0
  37. mgm/lib/python3.10/site-packages/fsspec/implementations/cached.py +929 -0
  38. mgm/lib/python3.10/site-packages/fsspec/implementations/dask.py +152 -0
  39. mgm/lib/python3.10/site-packages/fsspec/implementations/data.py +58 -0
  40. mgm/lib/python3.10/site-packages/fsspec/implementations/dbfs.py +467 -0
  41. mgm/lib/python3.10/site-packages/fsspec/implementations/dirfs.py +384 -0
  42. mgm/lib/python3.10/site-packages/fsspec/implementations/ftp.py +395 -0
  43. mgm/lib/python3.10/site-packages/fsspec/implementations/git.py +115 -0
  44. mgm/lib/python3.10/site-packages/fsspec/implementations/github.py +239 -0
  45. mgm/lib/python3.10/site-packages/fsspec/implementations/http.py +856 -0
  46. mgm/lib/python3.10/site-packages/fsspec/implementations/jupyter.py +124 -0
  47. mgm/lib/python3.10/site-packages/fsspec/implementations/libarchive.py +213 -0
  48. mgm/lib/python3.10/site-packages/fsspec/implementations/local.py +476 -0
  49. mgm/lib/python3.10/site-packages/fsspec/implementations/memory.py +312 -0
  50. mgm/lib/python3.10/site-packages/fsspec/implementations/reference.py +1216 -0
mgm/lib/python3.10/site-packages/cmake/data/doc/cmake-3.31/Copyright.txt ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ CMake - Cross Platform Makefile Generator
2
+ Copyright 2000-2024 Kitware, Inc. and Contributors
3
+ All rights reserved.
4
+
5
+ Redistribution and use in source and binary forms, with or without
6
+ modification, are permitted provided that the following conditions
7
+ are met:
8
+
9
+ * Redistributions of source code must retain the above copyright
10
+ notice, this list of conditions and the following disclaimer.
11
+
12
+ * Redistributions in binary form must reproduce the above copyright
13
+ notice, this list of conditions and the following disclaimer in the
14
+ documentation and/or other materials provided with the distribution.
15
+
16
+ * Neither the name of Kitware, Inc. nor the names of Contributors
17
+ may be used to endorse or promote products derived from this
18
+ software without specific prior written permission.
19
+
20
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31
+
32
+ ------------------------------------------------------------------------------
33
+
34
+ The following individuals and institutions are among the Contributors:
35
+
36
+ * Aaron C. Meadows <cmake@shadowguarddev.com>
37
+ * Adriaan de Groot <groot@kde.org>
38
+ * Aleksey Avdeev <solo@altlinux.ru>
39
+ * Alexander Neundorf <neundorf@kde.org>
40
+ * Alexander Smorkalov <alexander.smorkalov@itseez.com>
41
+ * Alexey Sokolov <sokolov@google.com>
42
+ * Alex Merry <alex.merry@kde.org>
43
+ * Alex Turbov <i.zaufi@gmail.com>
44
+ * Andreas Pakulat <apaku@gmx.de>
45
+ * Andreas Schneider <asn@cryptomilk.org>
46
+ * André Rigland Brodtkorb <Andre.Brodtkorb@ifi.uio.no>
47
+ * Axel Huebl, Helmholtz-Zentrum Dresden - Rossendorf
48
+ * Benjamin Eikel
49
+ * Bjoern Ricks <bjoern.ricks@gmail.com>
50
+ * Brad Hards <bradh@kde.org>
51
+ * Christopher Harvey
52
+ * Christoph Grüninger <foss@grueninger.de>
53
+ * Clement Creusot <creusot@cs.york.ac.uk>
54
+ * Daniel Blezek <blezek@gmail.com>
55
+ * Daniel Pfeifer <daniel@pfeifer-mail.de>
56
+ * Dawid Wróbel <me@dawidwrobel.com>
57
+ * Enrico Scholz <enrico.scholz@informatik.tu-chemnitz.de>
58
+ * Eran Ifrah <eran.ifrah@gmail.com>
59
+ * Esben Mose Hansen, Ange Optimization ApS
60
+ * Geoffrey Viola <geoffrey.viola@asirobots.com>
61
+ * Google Inc
62
+ * Gregor Jasny
63
+ * Helio Chissini de Castro <helio@kde.org>
64
+ * Ilya Lavrenov <ilya.lavrenov@itseez.com>
65
+ * Insight Software Consortium <insightsoftwareconsortium.org>
66
+ * Intel Corporation <www.intel.com>
67
+ * Jan Woetzel
68
+ * Jordan Williams <jordan@jwillikers.com>
69
+ * Julien Schueller
70
+ * Kelly Thompson <kgt@lanl.gov>
71
+ * Konstantin Podsvirov <konstantin@podsvirov.pro>
72
+ * Laurent Montel <montel@kde.org>
73
+ * Mario Bensi <mbensi@ipsquad.net>
74
+ * Martin Gräßlin <mgraesslin@kde.org>
75
+ * Mathieu Malaterre <mathieu.malaterre@gmail.com>
76
+ * Matthaeus G. Chajdas
77
+ * Matthias Kretz <kretz@kde.org>
78
+ * Matthias Maennich <matthias@maennich.net>
79
+ * Michael Hirsch, Ph.D. <www.scivision.co>
80
+ * Michael Stürmer
81
+ * Miguel A. Figueroa-Villanueva
82
+ * Mike Durso <rbprogrammer@gmail.com>
83
+ * Mike Jackson
84
+ * Mike McQuaid <mike@mikemcquaid.com>
85
+ * Nicolas Bock <nicolasbock@gmail.com>
86
+ * Nicolas Despres <nicolas.despres@gmail.com>
87
+ * Nikita Krupen'ko <krnekit@gmail.com>
88
+ * NVIDIA Corporation <www.nvidia.com>
89
+ * OpenGamma Ltd. <opengamma.com>
90
+ * Patrick Stotko <stotko@cs.uni-bonn.de>
91
+ * Per Øyvind Karlsen <peroyvind@mandriva.org>
92
+ * Peter Collingbourne <peter@pcc.me.uk>
93
+ * Petr Gotthard <gotthard@honeywell.com>
94
+ * Philip Lowman <philip@yhbt.com>
95
+ * Philippe Proulx <pproulx@efficios.com>
96
+ * Raffi Enficiaud, Max Planck Society
97
+ * Raumfeld <raumfeld.com>
98
+ * Roger Leigh <rleigh@codelibre.net>
99
+ * Rolf Eike Beer <eike@sf-mail.de>
100
+ * Roman Donchenko <roman.donchenko@itseez.com>
101
+ * Roman Kharitonov <roman.kharitonov@itseez.com>
102
+ * Ruslan Baratov
103
+ * Sebastian Holtermann <sebholt@xwmw.org>
104
+ * Stephen Kelly <steveire@gmail.com>
105
+ * Sylvain Joubert <joubert.sy@gmail.com>
106
+ * The Qt Company Ltd.
107
+ * Thomas Sondergaard <ts@medical-insight.com>
108
+ * Tobias Hunger <tobias.hunger@qt.io>
109
+ * Todd Gamblin <tgamblin@llnl.gov>
110
+ * Tristan Carel
111
+ * University of Dundee
112
+ * Vadim Zhukov
113
+ * Will Dicharry <wdicharry@stellarscience.com>
114
+
115
+ See version control history for details of individual contributions.
116
+
117
+ The above copyright and license notice applies to distributions of
118
+ CMake in source and binary form. Third-party software packages supplied
119
+ with CMake under compatible licenses provide their own copyright notices
120
+ documented in corresponding subdirectories or source files.
121
+
122
+ ------------------------------------------------------------------------------
123
+
124
+ CMake was initially developed by Kitware with the following sponsorship:
125
+
126
+ * National Library of Medicine at the National Institutes of Health
127
+ as part of the Insight Segmentation and Registration Toolkit (ITK).
128
+
129
+ * US National Labs (Los Alamos, Livermore, Sandia) ASC Parallel
130
+ Visualization Initiative.
131
+
132
+ * National Alliance for Medical Image Computing (NAMIC) is funded by the
133
+ National Institutes of Health through the NIH Roadmap for Medical Research,
134
+ Grant U54 EB005149.
135
+
136
+ * Kitware, Inc.
mgm/lib/python3.10/site-packages/cmake/data/doc/cmake-3.31/cmcppdap/NOTICE ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ 'cppdap' is a C++11 library implementation of the Debug Adapter Protocol.
2
+ Version as of 2023-01-06
3
+ Copyright Google LLC
4
+
5
+ This product includes software developed at Google.
mgm/lib/python3.10/site-packages/cmake/data/doc/cmake-3.31/cmcurl/COPYING ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ COPYRIGHT AND PERMISSION NOTICE
2
+
3
+ Copyright (c) 1996 - 2024, Daniel Stenberg, <daniel@haxx.se>, and many
4
+ contributors, see the THANKS file.
5
+
6
+ All rights reserved.
7
+
8
+ Permission to use, copy, modify, and distribute this software for any purpose
9
+ with or without fee is hereby granted, provided that the above copyright
10
+ notice and this permission notice appear in all copies.
11
+
12
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN
15
+ NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
16
+ DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
17
+ OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
18
+ OR OTHER DEALINGS IN THE SOFTWARE.
19
+
20
+ Except as contained in this notice, the name of a copyright holder shall not
21
+ be used in advertising or otherwise to promote the sale, use or other dealings
22
+ in this Software without prior written authorization of the copyright holder.
mgm/lib/python3.10/site-packages/cmake/data/doc/cmake-3.31/cmlibarchive/COPYING ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ The libarchive distribution as a whole is Copyright by Tim Kientzle
2
+ and is subject to the copyright notice reproduced at the bottom of
3
+ this file.
4
+
5
+ Each individual file in this distribution should have a clear
6
+ copyright/licensing statement at the beginning of the file. If any do
7
+ not, please let me know and I will rectify it. The following is
8
+ intended to summarize the copyright status of the individual files;
9
+ the actual statements in the files are controlling.
10
+
11
+ * Except as listed below, all C sources (including .c and .h files)
12
+ and documentation files are subject to the copyright notice reproduced
13
+ at the bottom of this file.
14
+
15
+ * The following source files are also subject in whole or in part to
16
+ a 3-clause UC Regents copyright; please read the individual source
17
+ files for details:
18
+ libarchive/archive_read_support_filter_compress.c
19
+ libarchive/archive_write_add_filter_compress.c
20
+ libarchive/mtree.5
21
+
22
+ * The following source files are in the public domain:
23
+ libarchive/archive_getdate.c
24
+
25
+ * The following source files are triple-licensed with the ability to choose
26
+ from CC0 1.0 Universal, OpenSSL or Apache 2.0 licenses:
27
+ libarchive/archive_blake2.h
28
+ libarchive/archive_blake2_impl.h
29
+ libarchive/archive_blake2s_ref.c
30
+ libarchive/archive_blake2sp_ref.c
31
+
32
+ * The build files---including Makefiles, configure scripts,
33
+ and auxiliary scripts used as part of the compile process---have
34
+ widely varying licensing terms. Please check individual files before
35
+ distributing them to see if those restrictions apply to you.
36
+
37
+ I intend for all new source code to use the license below and hope over
38
+ time to replace code with other licenses with new implementations that
39
+ do use the license below. The varying licensing of the build scripts
40
+ seems to be an unavoidable mess.
41
+
42
+
43
+ Copyright (c) 2003-2018 <author(s)>
44
+ All rights reserved.
45
+
46
+ Redistribution and use in source and binary forms, with or without
47
+ modification, are permitted provided that the following conditions
48
+ are met:
49
+ 1. Redistributions of source code must retain the above copyright
50
+ notice, this list of conditions and the following disclaimer
51
+ in this position and unchanged.
52
+ 2. Redistributions in binary form must reproduce the above copyright
53
+ notice, this list of conditions and the following disclaimer in the
54
+ documentation and/or other materials provided with the distribution.
55
+
56
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR(S) ``AS IS'' AND ANY EXPRESS OR
57
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
58
+ OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
59
+ IN NO EVENT SHALL THE AUTHOR(S) BE LIABLE FOR ANY DIRECT, INDIRECT,
60
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
61
+ NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
62
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
63
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
64
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
65
+ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
mgm/lib/python3.10/site-packages/cmake/data/doc/cmake-3.31/cmsys/Copyright.txt ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ KWSys - Kitware System Library
2
+ Copyright 2000-2016 Kitware, Inc. and Contributors
3
+ All rights reserved.
4
+
5
+ Redistribution and use in source and binary forms, with or without
6
+ modification, are permitted provided that the following conditions
7
+ are met:
8
+
9
+ * Redistributions of source code must retain the above copyright
10
+ notice, this list of conditions and the following disclaimer.
11
+
12
+ * Redistributions in binary form must reproduce the above copyright
13
+ notice, this list of conditions and the following disclaimer in the
14
+ documentation and/or other materials provided with the distribution.
15
+
16
+ * Neither the name of Kitware, Inc. nor the names of Contributors
17
+ may be used to endorse or promote products derived from this
18
+ software without specific prior written permission.
19
+
20
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31
+
32
+ ------------------------------------------------------------------------------
33
+
34
+ The following individuals and institutions are among the Contributors:
35
+
36
+ * Insight Software Consortium <insightsoftwareconsortium.org>
37
+
38
+ See version control history for details of individual contributions.
mgm/lib/python3.10/site-packages/cmake/data/doc/cmake-3.31/cmzlib/Copyright.txt ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 'zlib' general purpose compression library
2
+ version 1.3.1, January 22nd, 2024
3
+
4
+ Copyright (C) 1995-2024 Jean-loup Gailly and Mark Adler
5
+
6
+ This software is provided 'as-is', without any express or implied
7
+ warranty. In no event will the authors be held liable for any damages
8
+ arising from the use of this software.
9
+
10
+ Permission is granted to anyone to use this software for any purpose,
11
+ including commercial applications, and to alter it and redistribute it
12
+ freely, subject to the following restrictions:
13
+
14
+ 1. The origin of this software must not be misrepresented; you must not
15
+ claim that you wrote the original software. If you use this software
16
+ in a product, an acknowledgment in the product documentation would be
17
+ appreciated but is not required.
18
+ 2. Altered source versions must be plainly marked as such, and must not be
19
+ misrepresented as being the original software.
20
+ 3. This notice may not be removed or altered from any source distribution.
21
+
22
+ Jean-loup Gailly Mark Adler
23
+ jloup@gzip.org madler@alumni.caltech.edu
mgm/lib/python3.10/site-packages/fsspec/__init__.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from importlib.metadata import entry_points
2
+
3
+ from . import caching
4
+ from ._version import __version__ # noqa: F401
5
+ from .callbacks import Callback
6
+ from .compression import available_compressions
7
+ from .core import get_fs_token_paths, open, open_files, open_local, url_to_fs
8
+ from .exceptions import FSTimeoutError
9
+ from .mapping import FSMap, get_mapper
10
+ from .registry import (
11
+ available_protocols,
12
+ filesystem,
13
+ get_filesystem_class,
14
+ register_implementation,
15
+ registry,
16
+ )
17
+ from .spec import AbstractFileSystem
18
+
19
+ __all__ = [
20
+ "AbstractFileSystem",
21
+ "FSTimeoutError",
22
+ "FSMap",
23
+ "filesystem",
24
+ "register_implementation",
25
+ "get_filesystem_class",
26
+ "get_fs_token_paths",
27
+ "get_mapper",
28
+ "open",
29
+ "open_files",
30
+ "open_local",
31
+ "registry",
32
+ "caching",
33
+ "Callback",
34
+ "available_protocols",
35
+ "available_compressions",
36
+ "url_to_fs",
37
+ ]
38
+
39
+
40
+ def process_entries():
41
+ if entry_points is not None:
42
+ try:
43
+ eps = entry_points()
44
+ except TypeError:
45
+ pass # importlib-metadata < 0.8
46
+ else:
47
+ if hasattr(eps, "select"): # Python 3.10+ / importlib_metadata >= 3.9.0
48
+ specs = eps.select(group="fsspec.specs")
49
+ else:
50
+ specs = eps.get("fsspec.specs", [])
51
+ registered_names = {}
52
+ for spec in specs:
53
+ err_msg = f"Unable to load filesystem from {spec}"
54
+ name = spec.name
55
+ if name in registered_names:
56
+ continue
57
+ registered_names[name] = True
58
+ register_implementation(
59
+ name,
60
+ spec.value.replace(":", "."),
61
+ errtxt=err_msg,
62
+ # We take our implementations as the ones to overload with if
63
+ # for some reason we encounter some, may be the same, already
64
+ # registered
65
+ clobber=True,
66
+ )
67
+
68
+
69
+ process_entries()
mgm/lib/python3.10/site-packages/fsspec/__pycache__/_version.cpython-310.pyc ADDED
Binary file (482 Bytes). View file
 
mgm/lib/python3.10/site-packages/fsspec/__pycache__/gui.cpython-310.pyc ADDED
Binary file (14.6 kB). View file
 
mgm/lib/python3.10/site-packages/fsspec/__pycache__/registry.cpython-310.pyc ADDED
Binary file (9.13 kB). View file
 
mgm/lib/python3.10/site-packages/fsspec/_version.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # file generated by setuptools_scm
2
+ # don't change, don't track in version control
3
+ TYPE_CHECKING = False
4
+ if TYPE_CHECKING:
5
+ from typing import Tuple, Union
6
+ VERSION_TUPLE = Tuple[Union[int, str], ...]
7
+ else:
8
+ VERSION_TUPLE = object
9
+
10
+ version: str
11
+ __version__: str
12
+ __version_tuple__: VERSION_TUPLE
13
+ version_tuple: VERSION_TUPLE
14
+
15
+ __version__ = version = '2024.12.0'
16
+ __version_tuple__ = version_tuple = (2024, 12, 0)
mgm/lib/python3.10/site-packages/fsspec/caching.py ADDED
@@ -0,0 +1,966 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import collections
4
+ import functools
5
+ import logging
6
+ import math
7
+ import os
8
+ import threading
9
+ import warnings
10
+ from concurrent.futures import Future, ThreadPoolExecutor
11
+ from itertools import groupby
12
+ from operator import itemgetter
13
+ from typing import (
14
+ TYPE_CHECKING,
15
+ Any,
16
+ Callable,
17
+ ClassVar,
18
+ Generic,
19
+ NamedTuple,
20
+ Optional,
21
+ OrderedDict,
22
+ TypeVar,
23
+ )
24
+
25
+ if TYPE_CHECKING:
26
+ import mmap
27
+
28
+ from typing_extensions import ParamSpec
29
+
30
+ P = ParamSpec("P")
31
+ else:
32
+ P = TypeVar("P")
33
+
34
+ T = TypeVar("T")
35
+
36
+
37
+ logger = logging.getLogger("fsspec")
38
+
39
+ Fetcher = Callable[[int, int], bytes] # Maps (start, end) to bytes
40
+
41
+
42
+ class BaseCache:
43
+ """Pass-though cache: doesn't keep anything, calls every time
44
+
45
+ Acts as base class for other cachers
46
+
47
+ Parameters
48
+ ----------
49
+ blocksize: int
50
+ How far to read ahead in numbers of bytes
51
+ fetcher: func
52
+ Function of the form f(start, end) which gets bytes from remote as
53
+ specified
54
+ size: int
55
+ How big this file is
56
+ """
57
+
58
+ name: ClassVar[str] = "none"
59
+
60
+ def __init__(self, blocksize: int, fetcher: Fetcher, size: int) -> None:
61
+ self.blocksize = blocksize
62
+ self.nblocks = 0
63
+ self.fetcher = fetcher
64
+ self.size = size
65
+ self.hit_count = 0
66
+ self.miss_count = 0
67
+ # the bytes that we actually requested
68
+ self.total_requested_bytes = 0
69
+
70
+ def _fetch(self, start: int | None, stop: int | None) -> bytes:
71
+ if start is None:
72
+ start = 0
73
+ if stop is None:
74
+ stop = self.size
75
+ if start >= self.size or start >= stop:
76
+ return b""
77
+ return self.fetcher(start, stop)
78
+
79
+ def _reset_stats(self) -> None:
80
+ """Reset hit and miss counts for a more ganular report e.g. by file."""
81
+ self.hit_count = 0
82
+ self.miss_count = 0
83
+ self.total_requested_bytes = 0
84
+
85
+ def _log_stats(self) -> str:
86
+ """Return a formatted string of the cache statistics."""
87
+ if self.hit_count == 0 and self.miss_count == 0:
88
+ # a cache that does nothing, this is for logs only
89
+ return ""
90
+ return f" , {self.name}: {self.hit_count} hits, {self.miss_count} misses, {self.total_requested_bytes} total requested bytes"
91
+
92
+ def __repr__(self) -> str:
93
+ # TODO: use rich for better formatting
94
+ return f"""
95
+ <{self.__class__.__name__}:
96
+ block size : {self.blocksize}
97
+ block count : {self.nblocks}
98
+ file size : {self.size}
99
+ cache hits : {self.hit_count}
100
+ cache misses: {self.miss_count}
101
+ total requested bytes: {self.total_requested_bytes}>
102
+ """
103
+
104
+
105
+ class MMapCache(BaseCache):
106
+ """memory-mapped sparse file cache
107
+
108
+ Opens temporary file, which is filled blocks-wise when data is requested.
109
+ Ensure there is enough disc space in the temporary location.
110
+
111
+ This cache method might only work on posix
112
+ """
113
+
114
+ name = "mmap"
115
+
116
+ def __init__(
117
+ self,
118
+ blocksize: int,
119
+ fetcher: Fetcher,
120
+ size: int,
121
+ location: str | None = None,
122
+ blocks: set[int] | None = None,
123
+ ) -> None:
124
+ super().__init__(blocksize, fetcher, size)
125
+ self.blocks = set() if blocks is None else blocks
126
+ self.location = location
127
+ self.cache = self._makefile()
128
+
129
+ def _makefile(self) -> mmap.mmap | bytearray:
130
+ import mmap
131
+ import tempfile
132
+
133
+ if self.size == 0:
134
+ return bytearray()
135
+
136
+ # posix version
137
+ if self.location is None or not os.path.exists(self.location):
138
+ if self.location is None:
139
+ fd = tempfile.TemporaryFile()
140
+ self.blocks = set()
141
+ else:
142
+ fd = open(self.location, "wb+")
143
+ fd.seek(self.size - 1)
144
+ fd.write(b"1")
145
+ fd.flush()
146
+ else:
147
+ fd = open(self.location, "r+b")
148
+
149
+ return mmap.mmap(fd.fileno(), self.size)
150
+
151
+ def _fetch(self, start: int | None, end: int | None) -> bytes:
152
+ logger.debug(f"MMap cache fetching {start}-{end}")
153
+ if start is None:
154
+ start = 0
155
+ if end is None:
156
+ end = self.size
157
+ if start >= self.size or start >= end:
158
+ return b""
159
+ start_block = start // self.blocksize
160
+ end_block = end // self.blocksize
161
+ block_range = range(start_block, end_block + 1)
162
+ # Determine which blocks need to be fetched. This sequence is sorted by construction.
163
+ need = (i for i in block_range if i not in self.blocks)
164
+ # Count the number of blocks already cached
165
+ self.hit_count += sum(1 for i in block_range if i in self.blocks)
166
+
167
+ # Consolidate needed blocks.
168
+ # Algorithm adapted from Python 2.x itertools documentation.
169
+ # We are grouping an enumerated sequence of blocks. By comparing when the difference
170
+ # between an ascending range (provided by enumerate) and the needed block numbers
171
+ # we can detect when the block number skips values. The key computes this difference.
172
+ # Whenever the difference changes, we know that we have previously cached block(s),
173
+ # and a new group is started. In other words, this algorithm neatly groups
174
+ # runs of consecutive block numbers so they can be fetched together.
175
+ for _, _blocks in groupby(enumerate(need), key=lambda x: x[0] - x[1]):
176
+ # Extract the blocks from the enumerated sequence
177
+ _blocks = tuple(map(itemgetter(1), _blocks))
178
+ # Compute start of first block
179
+ sstart = _blocks[0] * self.blocksize
180
+ # Compute the end of the last block. Last block may not be full size.
181
+ send = min(_blocks[-1] * self.blocksize + self.blocksize, self.size)
182
+
183
+ # Fetch bytes (could be multiple consecutive blocks)
184
+ self.total_requested_bytes += send - sstart
185
+ logger.debug(
186
+ f"MMap get blocks {_blocks[0]}-{_blocks[-1]} ({sstart}-{send})"
187
+ )
188
+ self.cache[sstart:send] = self.fetcher(sstart, send)
189
+
190
+ # Update set of cached blocks
191
+ self.blocks.update(_blocks)
192
+ # Update cache statistics with number of blocks we had to cache
193
+ self.miss_count += len(_blocks)
194
+
195
+ return self.cache[start:end]
196
+
197
+ def __getstate__(self) -> dict[str, Any]:
198
+ state = self.__dict__.copy()
199
+ # Remove the unpicklable entries.
200
+ del state["cache"]
201
+ return state
202
+
203
+ def __setstate__(self, state: dict[str, Any]) -> None:
204
+ # Restore instance attributes
205
+ self.__dict__.update(state)
206
+ self.cache = self._makefile()
207
+
208
+
209
+ class ReadAheadCache(BaseCache):
210
+ """Cache which reads only when we get beyond a block of data
211
+
212
+ This is a much simpler version of BytesCache, and does not attempt to
213
+ fill holes in the cache or keep fragments alive. It is best suited to
214
+ many small reads in a sequential order (e.g., reading lines from a file).
215
+ """
216
+
217
+ name = "readahead"
218
+
219
+ def __init__(self, blocksize: int, fetcher: Fetcher, size: int) -> None:
220
+ super().__init__(blocksize, fetcher, size)
221
+ self.cache = b""
222
+ self.start = 0
223
+ self.end = 0
224
+
225
+ def _fetch(self, start: int | None, end: int | None) -> bytes:
226
+ if start is None:
227
+ start = 0
228
+ if end is None or end > self.size:
229
+ end = self.size
230
+ if start >= self.size or start >= end:
231
+ return b""
232
+ l = end - start
233
+ if start >= self.start and end <= self.end:
234
+ # cache hit
235
+ self.hit_count += 1
236
+ return self.cache[start - self.start : end - self.start]
237
+ elif self.start <= start < self.end:
238
+ # partial hit
239
+ self.miss_count += 1
240
+ part = self.cache[start - self.start :]
241
+ l -= len(part)
242
+ start = self.end
243
+ else:
244
+ # miss
245
+ self.miss_count += 1
246
+ part = b""
247
+ end = min(self.size, end + self.blocksize)
248
+ self.total_requested_bytes += end - start
249
+ self.cache = self.fetcher(start, end) # new block replaces old
250
+ self.start = start
251
+ self.end = self.start + len(self.cache)
252
+ return part + self.cache[:l]
253
+
254
+
255
+ class FirstChunkCache(BaseCache):
256
+ """Caches the first block of a file only
257
+
258
+ This may be useful for file types where the metadata is stored in the header,
259
+ but is randomly accessed.
260
+ """
261
+
262
+ name = "first"
263
+
264
+ def __init__(self, blocksize: int, fetcher: Fetcher, size: int) -> None:
265
+ if blocksize > size:
266
+ # this will buffer the whole thing
267
+ blocksize = size
268
+ super().__init__(blocksize, fetcher, size)
269
+ self.cache: bytes | None = None
270
+
271
+ def _fetch(self, start: int | None, end: int | None) -> bytes:
272
+ start = start or 0
273
+ if start > self.size:
274
+ logger.debug("FirstChunkCache: requested start > file size")
275
+ return b""
276
+
277
+ end = min(end, self.size)
278
+
279
+ if start < self.blocksize:
280
+ if self.cache is None:
281
+ self.miss_count += 1
282
+ if end > self.blocksize:
283
+ self.total_requested_bytes += end
284
+ data = self.fetcher(0, end)
285
+ self.cache = data[: self.blocksize]
286
+ return data[start:]
287
+ self.cache = self.fetcher(0, self.blocksize)
288
+ self.total_requested_bytes += self.blocksize
289
+ part = self.cache[start:end]
290
+ if end > self.blocksize:
291
+ self.total_requested_bytes += end - self.blocksize
292
+ part += self.fetcher(self.blocksize, end)
293
+ self.hit_count += 1
294
+ return part
295
+ else:
296
+ self.miss_count += 1
297
+ self.total_requested_bytes += end - start
298
+ return self.fetcher(start, end)
299
+
300
+
301
+ class BlockCache(BaseCache):
302
+ """
303
+ Cache holding memory as a set of blocks.
304
+
305
+ Requests are only ever made ``blocksize`` at a time, and are
306
+ stored in an LRU cache. The least recently accessed block is
307
+ discarded when more than ``maxblocks`` are stored.
308
+
309
+ Parameters
310
+ ----------
311
+ blocksize : int
312
+ The number of bytes to store in each block.
313
+ Requests are only ever made for ``blocksize``, so this
314
+ should balance the overhead of making a request against
315
+ the granularity of the blocks.
316
+ fetcher : Callable
317
+ size : int
318
+ The total size of the file being cached.
319
+ maxblocks : int
320
+ The maximum number of blocks to cache for. The maximum memory
321
+ use for this cache is then ``blocksize * maxblocks``.
322
+ """
323
+
324
+ name = "blockcache"
325
+
326
+ def __init__(
327
+ self, blocksize: int, fetcher: Fetcher, size: int, maxblocks: int = 32
328
+ ) -> None:
329
+ super().__init__(blocksize, fetcher, size)
330
+ self.nblocks = math.ceil(size / blocksize)
331
+ self.maxblocks = maxblocks
332
+ self._fetch_block_cached = functools.lru_cache(maxblocks)(self._fetch_block)
333
+
334
+ def cache_info(self):
335
+ """
336
+ The statistics on the block cache.
337
+
338
+ Returns
339
+ -------
340
+ NamedTuple
341
+ Returned directly from the LRU Cache used internally.
342
+ """
343
+ return self._fetch_block_cached.cache_info()
344
+
345
+ def __getstate__(self) -> dict[str, Any]:
346
+ state = self.__dict__
347
+ del state["_fetch_block_cached"]
348
+ return state
349
+
350
+ def __setstate__(self, state: dict[str, Any]) -> None:
351
+ self.__dict__.update(state)
352
+ self._fetch_block_cached = functools.lru_cache(state["maxblocks"])(
353
+ self._fetch_block
354
+ )
355
+
356
+ def _fetch(self, start: int | None, end: int | None) -> bytes:
357
+ if start is None:
358
+ start = 0
359
+ if end is None:
360
+ end = self.size
361
+ if start >= self.size or start >= end:
362
+ return b""
363
+
364
+ # byte position -> block numbers
365
+ start_block_number = start // self.blocksize
366
+ end_block_number = end // self.blocksize
367
+
368
+ # these are cached, so safe to do multiple calls for the same start and end.
369
+ for block_number in range(start_block_number, end_block_number + 1):
370
+ self._fetch_block_cached(block_number)
371
+
372
+ return self._read_cache(
373
+ start,
374
+ end,
375
+ start_block_number=start_block_number,
376
+ end_block_number=end_block_number,
377
+ )
378
+
379
+ def _fetch_block(self, block_number: int) -> bytes:
380
+ """
381
+ Fetch the block of data for `block_number`.
382
+ """
383
+ if block_number > self.nblocks:
384
+ raise ValueError(
385
+ f"'block_number={block_number}' is greater than "
386
+ f"the number of blocks ({self.nblocks})"
387
+ )
388
+
389
+ start = block_number * self.blocksize
390
+ end = start + self.blocksize
391
+ self.total_requested_bytes += end - start
392
+ self.miss_count += 1
393
+ logger.info("BlockCache fetching block %d", block_number)
394
+ block_contents = super()._fetch(start, end)
395
+ return block_contents
396
+
397
+ def _read_cache(
398
+ self, start: int, end: int, start_block_number: int, end_block_number: int
399
+ ) -> bytes:
400
+ """
401
+ Read from our block cache.
402
+
403
+ Parameters
404
+ ----------
405
+ start, end : int
406
+ The start and end byte positions.
407
+ start_block_number, end_block_number : int
408
+ The start and end block numbers.
409
+ """
410
+ start_pos = start % self.blocksize
411
+ end_pos = end % self.blocksize
412
+
413
+ self.hit_count += 1
414
+ if start_block_number == end_block_number:
415
+ block: bytes = self._fetch_block_cached(start_block_number)
416
+ return block[start_pos:end_pos]
417
+
418
+ else:
419
+ # read from the initial
420
+ out = [self._fetch_block_cached(start_block_number)[start_pos:]]
421
+
422
+ # intermediate blocks
423
+ # Note: it'd be nice to combine these into one big request. However
424
+ # that doesn't play nicely with our LRU cache.
425
+ out.extend(
426
+ map(
427
+ self._fetch_block_cached,
428
+ range(start_block_number + 1, end_block_number),
429
+ )
430
+ )
431
+
432
+ # final block
433
+ out.append(self._fetch_block_cached(end_block_number)[:end_pos])
434
+
435
+ return b"".join(out)
436
+
437
+
438
+ class BytesCache(BaseCache):
439
+ """Cache which holds data in a in-memory bytes object
440
+
441
+ Implements read-ahead by the block size, for semi-random reads progressing
442
+ through the file.
443
+
444
+ Parameters
445
+ ----------
446
+ trim: bool
447
+ As we read more data, whether to discard the start of the buffer when
448
+ we are more than a blocksize ahead of it.
449
+ """
450
+
451
+ name: ClassVar[str] = "bytes"
452
+
453
+ def __init__(
454
+ self, blocksize: int, fetcher: Fetcher, size: int, trim: bool = True
455
+ ) -> None:
456
+ super().__init__(blocksize, fetcher, size)
457
+ self.cache = b""
458
+ self.start: int | None = None
459
+ self.end: int | None = None
460
+ self.trim = trim
461
+
462
+ def _fetch(self, start: int | None, end: int | None) -> bytes:
463
+ # TODO: only set start/end after fetch, in case it fails?
464
+ # is this where retry logic might go?
465
+ if start is None:
466
+ start = 0
467
+ if end is None:
468
+ end = self.size
469
+ if start >= self.size or start >= end:
470
+ return b""
471
+ if (
472
+ self.start is not None
473
+ and start >= self.start
474
+ and self.end is not None
475
+ and end < self.end
476
+ ):
477
+ # cache hit: we have all the required data
478
+ offset = start - self.start
479
+ self.hit_count += 1
480
+ return self.cache[offset : offset + end - start]
481
+
482
+ if self.blocksize:
483
+ bend = min(self.size, end + self.blocksize)
484
+ else:
485
+ bend = end
486
+
487
+ if bend == start or start > self.size:
488
+ return b""
489
+
490
+ if (self.start is None or start < self.start) and (
491
+ self.end is None or end > self.end
492
+ ):
493
+ # First read, or extending both before and after
494
+ self.total_requested_bytes += bend - start
495
+ self.miss_count += 1
496
+ self.cache = self.fetcher(start, bend)
497
+ self.start = start
498
+ else:
499
+ assert self.start is not None
500
+ assert self.end is not None
501
+ self.miss_count += 1
502
+
503
+ if start < self.start:
504
+ if self.end is None or self.end - end > self.blocksize:
505
+ self.total_requested_bytes += bend - start
506
+ self.cache = self.fetcher(start, bend)
507
+ self.start = start
508
+ else:
509
+ self.total_requested_bytes += self.start - start
510
+ new = self.fetcher(start, self.start)
511
+ self.start = start
512
+ self.cache = new + self.cache
513
+ elif self.end is not None and bend > self.end:
514
+ if self.end > self.size:
515
+ pass
516
+ elif end - self.end > self.blocksize:
517
+ self.total_requested_bytes += bend - start
518
+ self.cache = self.fetcher(start, bend)
519
+ self.start = start
520
+ else:
521
+ self.total_requested_bytes += bend - self.end
522
+ new = self.fetcher(self.end, bend)
523
+ self.cache = self.cache + new
524
+
525
+ self.end = self.start + len(self.cache)
526
+ offset = start - self.start
527
+ out = self.cache[offset : offset + end - start]
528
+ if self.trim:
529
+ num = (self.end - self.start) // (self.blocksize + 1)
530
+ if num > 1:
531
+ self.start += self.blocksize * num
532
+ self.cache = self.cache[self.blocksize * num :]
533
+ return out
534
+
535
+ def __len__(self) -> int:
536
+ return len(self.cache)
537
+
538
+
539
+ class AllBytes(BaseCache):
540
+ """Cache entire contents of the file"""
541
+
542
+ name: ClassVar[str] = "all"
543
+
544
+ def __init__(
545
+ self,
546
+ blocksize: int | None = None,
547
+ fetcher: Fetcher | None = None,
548
+ size: int | None = None,
549
+ data: bytes | None = None,
550
+ ) -> None:
551
+ super().__init__(blocksize, fetcher, size) # type: ignore[arg-type]
552
+ if data is None:
553
+ self.miss_count += 1
554
+ self.total_requested_bytes += self.size
555
+ data = self.fetcher(0, self.size)
556
+ self.data = data
557
+
558
+ def _fetch(self, start: int | None, stop: int | None) -> bytes:
559
+ self.hit_count += 1
560
+ return self.data[start:stop]
561
+
562
+
563
+ class KnownPartsOfAFile(BaseCache):
564
+ """
565
+ Cache holding known file parts.
566
+
567
+ Parameters
568
+ ----------
569
+ blocksize: int
570
+ How far to read ahead in numbers of bytes
571
+ fetcher: func
572
+ Function of the form f(start, end) which gets bytes from remote as
573
+ specified
574
+ size: int
575
+ How big this file is
576
+ data: dict
577
+ A dictionary mapping explicit `(start, stop)` file-offset tuples
578
+ with known bytes.
579
+ strict: bool, default True
580
+ Whether to fetch reads that go beyond a known byte-range boundary.
581
+ If `False`, any read that ends outside a known part will be zero
582
+ padded. Note that zero padding will not be used for reads that
583
+ begin outside a known byte-range.
584
+ """
585
+
586
+ name: ClassVar[str] = "parts"
587
+
588
+ def __init__(
589
+ self,
590
+ blocksize: int,
591
+ fetcher: Fetcher,
592
+ size: int,
593
+ data: Optional[dict[tuple[int, int], bytes]] = None,
594
+ strict: bool = True,
595
+ **_: Any,
596
+ ):
597
+ super().__init__(blocksize, fetcher, size)
598
+ self.strict = strict
599
+
600
+ # simple consolidation of contiguous blocks
601
+ if data:
602
+ old_offsets = sorted(data.keys())
603
+ offsets = [old_offsets[0]]
604
+ blocks = [data.pop(old_offsets[0])]
605
+ for start, stop in old_offsets[1:]:
606
+ start0, stop0 = offsets[-1]
607
+ if start == stop0:
608
+ offsets[-1] = (start0, stop)
609
+ blocks[-1] += data.pop((start, stop))
610
+ else:
611
+ offsets.append((start, stop))
612
+ blocks.append(data.pop((start, stop)))
613
+
614
+ self.data = dict(zip(offsets, blocks))
615
+ else:
616
+ self.data = {}
617
+
618
+ def _fetch(self, start: int | None, stop: int | None) -> bytes:
619
+ if start is None:
620
+ start = 0
621
+ if stop is None:
622
+ stop = self.size
623
+
624
+ out = b""
625
+ for (loc0, loc1), data in self.data.items():
626
+ # If self.strict=False, use zero-padded data
627
+ # for reads beyond the end of a "known" buffer
628
+ if loc0 <= start < loc1:
629
+ off = start - loc0
630
+ out = data[off : off + stop - start]
631
+ if not self.strict or loc0 <= stop <= loc1:
632
+ # The request is within a known range, or
633
+ # it begins within a known range, and we
634
+ # are allowed to pad reads beyond the
635
+ # buffer with zero
636
+ out += b"\x00" * (stop - start - len(out))
637
+ self.hit_count += 1
638
+ return out
639
+ else:
640
+ # The request ends outside a known range,
641
+ # and we are being "strict" about reads
642
+ # beyond the buffer
643
+ start = loc1
644
+ break
645
+
646
+ # We only get here if there is a request outside the
647
+ # known parts of the file. In an ideal world, this
648
+ # should never happen
649
+ if self.fetcher is None:
650
+ # We cannot fetch the data, so raise an error
651
+ raise ValueError(f"Read is outside the known file parts: {(start, stop)}. ")
652
+ # We can fetch the data, but should warn the user
653
+ # that this may be slow
654
+ warnings.warn(
655
+ f"Read is outside the known file parts: {(start, stop)}. "
656
+ f"IO/caching performance may be poor!"
657
+ )
658
+ logger.debug(f"KnownPartsOfAFile cache fetching {start}-{stop}")
659
+ self.total_requested_bytes += stop - start
660
+ self.miss_count += 1
661
+ return out + super()._fetch(start, stop)
662
+
663
+
664
+ class UpdatableLRU(Generic[P, T]):
665
+ """
666
+ Custom implementation of LRU cache that allows updating keys
667
+
668
+ Used by BackgroudBlockCache
669
+ """
670
+
671
+ class CacheInfo(NamedTuple):
672
+ hits: int
673
+ misses: int
674
+ maxsize: int
675
+ currsize: int
676
+
677
+ def __init__(self, func: Callable[P, T], max_size: int = 128) -> None:
678
+ self._cache: OrderedDict[Any, T] = collections.OrderedDict()
679
+ self._func = func
680
+ self._max_size = max_size
681
+ self._hits = 0
682
+ self._misses = 0
683
+ self._lock = threading.Lock()
684
+
685
+ def __call__(self, *args: P.args, **kwargs: P.kwargs) -> T:
686
+ if kwargs:
687
+ raise TypeError(f"Got unexpected keyword argument {kwargs.keys()}")
688
+ with self._lock:
689
+ if args in self._cache:
690
+ self._cache.move_to_end(args)
691
+ self._hits += 1
692
+ return self._cache[args]
693
+
694
+ result = self._func(*args, **kwargs)
695
+
696
+ with self._lock:
697
+ self._cache[args] = result
698
+ self._misses += 1
699
+ if len(self._cache) > self._max_size:
700
+ self._cache.popitem(last=False)
701
+
702
+ return result
703
+
704
+ def is_key_cached(self, *args: Any) -> bool:
705
+ with self._lock:
706
+ return args in self._cache
707
+
708
+ def add_key(self, result: T, *args: Any) -> None:
709
+ with self._lock:
710
+ self._cache[args] = result
711
+ if len(self._cache) > self._max_size:
712
+ self._cache.popitem(last=False)
713
+
714
+ def cache_info(self) -> UpdatableLRU.CacheInfo:
715
+ with self._lock:
716
+ return self.CacheInfo(
717
+ maxsize=self._max_size,
718
+ currsize=len(self._cache),
719
+ hits=self._hits,
720
+ misses=self._misses,
721
+ )
722
+
723
+
724
+ class BackgroundBlockCache(BaseCache):
725
+ """
726
+ Cache holding memory as a set of blocks with pre-loading of
727
+ the next block in the background.
728
+
729
+ Requests are only ever made ``blocksize`` at a time, and are
730
+ stored in an LRU cache. The least recently accessed block is
731
+ discarded when more than ``maxblocks`` are stored. If the
732
+ next block is not in cache, it is loaded in a separate thread
733
+ in non-blocking way.
734
+
735
+ Parameters
736
+ ----------
737
+ blocksize : int
738
+ The number of bytes to store in each block.
739
+ Requests are only ever made for ``blocksize``, so this
740
+ should balance the overhead of making a request against
741
+ the granularity of the blocks.
742
+ fetcher : Callable
743
+ size : int
744
+ The total size of the file being cached.
745
+ maxblocks : int
746
+ The maximum number of blocks to cache for. The maximum memory
747
+ use for this cache is then ``blocksize * maxblocks``.
748
+ """
749
+
750
+ name: ClassVar[str] = "background"
751
+
752
+ def __init__(
753
+ self, blocksize: int, fetcher: Fetcher, size: int, maxblocks: int = 32
754
+ ) -> None:
755
+ super().__init__(blocksize, fetcher, size)
756
+ self.nblocks = math.ceil(size / blocksize)
757
+ self.maxblocks = maxblocks
758
+ self._fetch_block_cached = UpdatableLRU(self._fetch_block, maxblocks)
759
+
760
+ self._thread_executor = ThreadPoolExecutor(max_workers=1)
761
+ self._fetch_future_block_number: int | None = None
762
+ self._fetch_future: Future[bytes] | None = None
763
+ self._fetch_future_lock = threading.Lock()
764
+
765
+ def cache_info(self) -> UpdatableLRU.CacheInfo:
766
+ """
767
+ The statistics on the block cache.
768
+
769
+ Returns
770
+ -------
771
+ NamedTuple
772
+ Returned directly from the LRU Cache used internally.
773
+ """
774
+ return self._fetch_block_cached.cache_info()
775
+
776
+ def __getstate__(self) -> dict[str, Any]:
777
+ state = self.__dict__
778
+ del state["_fetch_block_cached"]
779
+ del state["_thread_executor"]
780
+ del state["_fetch_future_block_number"]
781
+ del state["_fetch_future"]
782
+ del state["_fetch_future_lock"]
783
+ return state
784
+
785
+ def __setstate__(self, state) -> None:
786
+ self.__dict__.update(state)
787
+ self._fetch_block_cached = UpdatableLRU(self._fetch_block, state["maxblocks"])
788
+ self._thread_executor = ThreadPoolExecutor(max_workers=1)
789
+ self._fetch_future_block_number = None
790
+ self._fetch_future = None
791
+ self._fetch_future_lock = threading.Lock()
792
+
793
+ def _fetch(self, start: int | None, end: int | None) -> bytes:
794
+ if start is None:
795
+ start = 0
796
+ if end is None:
797
+ end = self.size
798
+ if start >= self.size or start >= end:
799
+ return b""
800
+
801
+ # byte position -> block numbers
802
+ start_block_number = start // self.blocksize
803
+ end_block_number = end // self.blocksize
804
+
805
+ fetch_future_block_number = None
806
+ fetch_future = None
807
+ with self._fetch_future_lock:
808
+ # Background thread is running. Check we we can or must join it.
809
+ if self._fetch_future is not None:
810
+ assert self._fetch_future_block_number is not None
811
+ if self._fetch_future.done():
812
+ logger.info("BlockCache joined background fetch without waiting.")
813
+ self._fetch_block_cached.add_key(
814
+ self._fetch_future.result(), self._fetch_future_block_number
815
+ )
816
+ # Cleanup the fetch variables. Done with fetching the block.
817
+ self._fetch_future_block_number = None
818
+ self._fetch_future = None
819
+ else:
820
+ # Must join if we need the block for the current fetch
821
+ must_join = bool(
822
+ start_block_number
823
+ <= self._fetch_future_block_number
824
+ <= end_block_number
825
+ )
826
+ if must_join:
827
+ # Copy to the local variables to release lock
828
+ # before waiting for result
829
+ fetch_future_block_number = self._fetch_future_block_number
830
+ fetch_future = self._fetch_future
831
+
832
+ # Cleanup the fetch variables. Have a local copy.
833
+ self._fetch_future_block_number = None
834
+ self._fetch_future = None
835
+
836
+ # Need to wait for the future for the current read
837
+ if fetch_future is not None:
838
+ logger.info("BlockCache waiting for background fetch.")
839
+ # Wait until result and put it in cache
840
+ self._fetch_block_cached.add_key(
841
+ fetch_future.result(), fetch_future_block_number
842
+ )
843
+
844
+ # these are cached, so safe to do multiple calls for the same start and end.
845
+ for block_number in range(start_block_number, end_block_number + 1):
846
+ self._fetch_block_cached(block_number)
847
+
848
+ # fetch next block in the background if nothing is running in the background,
849
+ # the block is within file and it is not already cached
850
+ end_block_plus_1 = end_block_number + 1
851
+ with self._fetch_future_lock:
852
+ if (
853
+ self._fetch_future is None
854
+ and end_block_plus_1 <= self.nblocks
855
+ and not self._fetch_block_cached.is_key_cached(end_block_plus_1)
856
+ ):
857
+ self._fetch_future_block_number = end_block_plus_1
858
+ self._fetch_future = self._thread_executor.submit(
859
+ self._fetch_block, end_block_plus_1, "async"
860
+ )
861
+
862
+ return self._read_cache(
863
+ start,
864
+ end,
865
+ start_block_number=start_block_number,
866
+ end_block_number=end_block_number,
867
+ )
868
+
869
+ def _fetch_block(self, block_number: int, log_info: str = "sync") -> bytes:
870
+ """
871
+ Fetch the block of data for `block_number`.
872
+ """
873
+ if block_number > self.nblocks:
874
+ raise ValueError(
875
+ f"'block_number={block_number}' is greater than "
876
+ f"the number of blocks ({self.nblocks})"
877
+ )
878
+
879
+ start = block_number * self.blocksize
880
+ end = start + self.blocksize
881
+ logger.info("BlockCache fetching block (%s) %d", log_info, block_number)
882
+ self.total_requested_bytes += end - start
883
+ self.miss_count += 1
884
+ block_contents = super()._fetch(start, end)
885
+ return block_contents
886
+
887
+ def _read_cache(
888
+ self, start: int, end: int, start_block_number: int, end_block_number: int
889
+ ) -> bytes:
890
+ """
891
+ Read from our block cache.
892
+
893
+ Parameters
894
+ ----------
895
+ start, end : int
896
+ The start and end byte positions.
897
+ start_block_number, end_block_number : int
898
+ The start and end block numbers.
899
+ """
900
+ start_pos = start % self.blocksize
901
+ end_pos = end % self.blocksize
902
+
903
+ # kind of pointless to count this as a hit, but it is
904
+ self.hit_count += 1
905
+
906
+ if start_block_number == end_block_number:
907
+ block = self._fetch_block_cached(start_block_number)
908
+ return block[start_pos:end_pos]
909
+
910
+ else:
911
+ # read from the initial
912
+ out = [self._fetch_block_cached(start_block_number)[start_pos:]]
913
+
914
+ # intermediate blocks
915
+ # Note: it'd be nice to combine these into one big request. However
916
+ # that doesn't play nicely with our LRU cache.
917
+ out.extend(
918
+ map(
919
+ self._fetch_block_cached,
920
+ range(start_block_number + 1, end_block_number),
921
+ )
922
+ )
923
+
924
+ # final block
925
+ out.append(self._fetch_block_cached(end_block_number)[:end_pos])
926
+
927
+ return b"".join(out)
928
+
929
+
930
+ caches: dict[str | None, type[BaseCache]] = {
931
+ # one custom case
932
+ None: BaseCache,
933
+ }
934
+
935
+
936
+ def register_cache(cls: type[BaseCache], clobber: bool = False) -> None:
937
+ """'Register' cache implementation.
938
+
939
+ Parameters
940
+ ----------
941
+ clobber: bool, optional
942
+ If set to True (default is False) - allow to overwrite existing
943
+ entry.
944
+
945
+ Raises
946
+ ------
947
+ ValueError
948
+ """
949
+ name = cls.name
950
+ if not clobber and name in caches:
951
+ raise ValueError(f"Cache with name {name!r} is already known: {caches[name]}")
952
+ caches[name] = cls
953
+
954
+
955
+ for c in (
956
+ BaseCache,
957
+ MMapCache,
958
+ BytesCache,
959
+ ReadAheadCache,
960
+ BlockCache,
961
+ FirstChunkCache,
962
+ AllBytes,
963
+ KnownPartsOfAFile,
964
+ BackgroundBlockCache,
965
+ ):
966
+ register_cache(c)
mgm/lib/python3.10/site-packages/fsspec/callbacks.py ADDED
@@ -0,0 +1,324 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import wraps
2
+
3
+
4
+ class Callback:
5
+ """
6
+ Base class and interface for callback mechanism
7
+
8
+ This class can be used directly for monitoring file transfers by
9
+ providing ``callback=Callback(hooks=...)`` (see the ``hooks`` argument,
10
+ below), or subclassed for more specialised behaviour.
11
+
12
+ Parameters
13
+ ----------
14
+ size: int (optional)
15
+ Nominal quantity for the value that corresponds to a complete
16
+ transfer, e.g., total number of tiles or total number of
17
+ bytes
18
+ value: int (0)
19
+ Starting internal counter value
20
+ hooks: dict or None
21
+ A dict of named functions to be called on each update. The signature
22
+ of these must be ``f(size, value, **kwargs)``
23
+ """
24
+
25
+ def __init__(self, size=None, value=0, hooks=None, **kwargs):
26
+ self.size = size
27
+ self.value = value
28
+ self.hooks = hooks or {}
29
+ self.kw = kwargs
30
+
31
+ def __enter__(self):
32
+ return self
33
+
34
+ def __exit__(self, *exc_args):
35
+ self.close()
36
+
37
+ def close(self):
38
+ """Close callback."""
39
+
40
+ def branched(self, path_1, path_2, **kwargs):
41
+ """
42
+ Return callback for child transfers
43
+
44
+ If this callback is operating at a higher level, e.g., put, which may
45
+ trigger transfers that can also be monitored. The function returns a callback
46
+ that has to be passed to the child method, e.g., put_file,
47
+ as `callback=` argument.
48
+
49
+ The implementation uses `callback.branch` for compatibility.
50
+ When implementing callbacks, it is recommended to override this function instead
51
+ of `branch` and avoid calling `super().branched(...)`.
52
+
53
+ Prefer using this function over `branch`.
54
+
55
+ Parameters
56
+ ----------
57
+ path_1: str
58
+ Child's source path
59
+ path_2: str
60
+ Child's destination path
61
+ **kwargs:
62
+ Arbitrary keyword arguments
63
+
64
+ Returns
65
+ -------
66
+ callback: Callback
67
+ A callback instance to be passed to the child method
68
+ """
69
+ self.branch(path_1, path_2, kwargs)
70
+ # mutate kwargs so that we can force the caller to pass "callback=" explicitly
71
+ return kwargs.pop("callback", DEFAULT_CALLBACK)
72
+
73
+ def branch_coro(self, fn):
74
+ """
75
+ Wraps a coroutine, and pass a new child callback to it.
76
+ """
77
+
78
+ @wraps(fn)
79
+ async def func(path1, path2: str, **kwargs):
80
+ with self.branched(path1, path2, **kwargs) as child:
81
+ return await fn(path1, path2, callback=child, **kwargs)
82
+
83
+ return func
84
+
85
+ def set_size(self, size):
86
+ """
87
+ Set the internal maximum size attribute
88
+
89
+ Usually called if not initially set at instantiation. Note that this
90
+ triggers a ``call()``.
91
+
92
+ Parameters
93
+ ----------
94
+ size: int
95
+ """
96
+ self.size = size
97
+ self.call()
98
+
99
+ def absolute_update(self, value):
100
+ """
101
+ Set the internal value state
102
+
103
+ Triggers ``call()``
104
+
105
+ Parameters
106
+ ----------
107
+ value: int
108
+ """
109
+ self.value = value
110
+ self.call()
111
+
112
+ def relative_update(self, inc=1):
113
+ """
114
+ Delta increment the internal counter
115
+
116
+ Triggers ``call()``
117
+
118
+ Parameters
119
+ ----------
120
+ inc: int
121
+ """
122
+ self.value += inc
123
+ self.call()
124
+
125
+ def call(self, hook_name=None, **kwargs):
126
+ """
127
+ Execute hook(s) with current state
128
+
129
+ Each function is passed the internal size and current value
130
+
131
+ Parameters
132
+ ----------
133
+ hook_name: str or None
134
+ If given, execute on this hook
135
+ kwargs: passed on to (all) hook(s)
136
+ """
137
+ if not self.hooks:
138
+ return
139
+ kw = self.kw.copy()
140
+ kw.update(kwargs)
141
+ if hook_name:
142
+ if hook_name not in self.hooks:
143
+ return
144
+ return self.hooks[hook_name](self.size, self.value, **kw)
145
+ for hook in self.hooks.values() or []:
146
+ hook(self.size, self.value, **kw)
147
+
148
+ def wrap(self, iterable):
149
+ """
150
+ Wrap an iterable to call ``relative_update`` on each iterations
151
+
152
+ Parameters
153
+ ----------
154
+ iterable: Iterable
155
+ The iterable that is being wrapped
156
+ """
157
+ for item in iterable:
158
+ self.relative_update()
159
+ yield item
160
+
161
+ def branch(self, path_1, path_2, kwargs):
162
+ """
163
+ Set callbacks for child transfers
164
+
165
+ If this callback is operating at a higher level, e.g., put, which may
166
+ trigger transfers that can also be monitored. The passed kwargs are
167
+ to be *mutated* to add ``callback=``, if this class supports branching
168
+ to children.
169
+
170
+ Parameters
171
+ ----------
172
+ path_1: str
173
+ Child's source path
174
+ path_2: str
175
+ Child's destination path
176
+ kwargs: dict
177
+ arguments passed to child method, e.g., put_file.
178
+
179
+ Returns
180
+ -------
181
+
182
+ """
183
+ return None
184
+
185
+ def no_op(self, *_, **__):
186
+ pass
187
+
188
+ def __getattr__(self, item):
189
+ """
190
+ If undefined methods are called on this class, nothing happens
191
+ """
192
+ return self.no_op
193
+
194
+ @classmethod
195
+ def as_callback(cls, maybe_callback=None):
196
+ """Transform callback=... into Callback instance
197
+
198
+ For the special value of ``None``, return the global instance of
199
+ ``NoOpCallback``. This is an alternative to including
200
+ ``callback=DEFAULT_CALLBACK`` directly in a method signature.
201
+ """
202
+ if maybe_callback is None:
203
+ return DEFAULT_CALLBACK
204
+ return maybe_callback
205
+
206
+
207
+ class NoOpCallback(Callback):
208
+ """
209
+ This implementation of Callback does exactly nothing
210
+ """
211
+
212
+ def call(self, *args, **kwargs):
213
+ return None
214
+
215
+
216
+ class DotPrinterCallback(Callback):
217
+ """
218
+ Simple example Callback implementation
219
+
220
+ Almost identical to Callback with a hook that prints a char; here we
221
+ demonstrate how the outer layer may print "#" and the inner layer "."
222
+ """
223
+
224
+ def __init__(self, chr_to_print="#", **kwargs):
225
+ self.chr = chr_to_print
226
+ super().__init__(**kwargs)
227
+
228
+ def branch(self, path_1, path_2, kwargs):
229
+ """Mutate kwargs to add new instance with different print char"""
230
+ kwargs["callback"] = DotPrinterCallback(".")
231
+
232
+ def call(self, **kwargs):
233
+ """Just outputs a character"""
234
+ print(self.chr, end="")
235
+
236
+
237
+ class TqdmCallback(Callback):
238
+ """
239
+ A callback to display a progress bar using tqdm
240
+
241
+ Parameters
242
+ ----------
243
+ tqdm_kwargs : dict, (optional)
244
+ Any argument accepted by the tqdm constructor.
245
+ See the `tqdm doc <https://tqdm.github.io/docs/tqdm/#__init__>`_.
246
+ Will be forwarded to `tqdm_cls`.
247
+ tqdm_cls: (optional)
248
+ subclass of `tqdm.tqdm`. If not passed, it will default to `tqdm.tqdm`.
249
+
250
+ Examples
251
+ --------
252
+ >>> import fsspec
253
+ >>> from fsspec.callbacks import TqdmCallback
254
+ >>> fs = fsspec.filesystem("memory")
255
+ >>> path2distant_data = "/your-path"
256
+ >>> fs.upload(
257
+ ".",
258
+ path2distant_data,
259
+ recursive=True,
260
+ callback=TqdmCallback(),
261
+ )
262
+
263
+ You can forward args to tqdm using the ``tqdm_kwargs`` parameter.
264
+
265
+ >>> fs.upload(
266
+ ".",
267
+ path2distant_data,
268
+ recursive=True,
269
+ callback=TqdmCallback(tqdm_kwargs={"desc": "Your tqdm description"}),
270
+ )
271
+
272
+ You can also customize the progress bar by passing a subclass of `tqdm`.
273
+
274
+ .. code-block:: python
275
+
276
+ class TqdmFormat(tqdm):
277
+ '''Provides a `total_time` format parameter'''
278
+ @property
279
+ def format_dict(self):
280
+ d = super().format_dict
281
+ total_time = d["elapsed"] * (d["total"] or 0) / max(d["n"], 1)
282
+ d.update(total_time=self.format_interval(total_time) + " in total")
283
+ return d
284
+
285
+ >>> with TqdmCallback(
286
+ tqdm_kwargs={
287
+ "desc": "desc",
288
+ "bar_format": "{total_time}: {percentage:.0f}%|{bar}{r_bar}",
289
+ },
290
+ tqdm_cls=TqdmFormat,
291
+ ) as callback:
292
+ fs.upload(".", path2distant_data, recursive=True, callback=callback)
293
+ """
294
+
295
+ def __init__(self, tqdm_kwargs=None, *args, **kwargs):
296
+ try:
297
+ from tqdm import tqdm
298
+
299
+ except ImportError as exce:
300
+ raise ImportError(
301
+ "Using TqdmCallback requires tqdm to be installed"
302
+ ) from exce
303
+
304
+ self._tqdm_cls = kwargs.pop("tqdm_cls", tqdm)
305
+ self._tqdm_kwargs = tqdm_kwargs or {}
306
+ self.tqdm = None
307
+ super().__init__(*args, **kwargs)
308
+
309
+ def call(self, *args, **kwargs):
310
+ if self.tqdm is None:
311
+ self.tqdm = self._tqdm_cls(total=self.size, **self._tqdm_kwargs)
312
+ self.tqdm.total = self.size
313
+ self.tqdm.update(self.value - self.tqdm.n)
314
+
315
+ def close(self):
316
+ if self.tqdm is not None:
317
+ self.tqdm.close()
318
+ self.tqdm = None
319
+
320
+ def __del__(self):
321
+ return self.close()
322
+
323
+
324
+ DEFAULT_CALLBACK = _DEFAULT_CALLBACK = NoOpCallback()
mgm/lib/python3.10/site-packages/fsspec/compression.py ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Helper functions for a standard streaming compression API"""
2
+
3
+ from zipfile import ZipFile
4
+
5
+ import fsspec.utils
6
+ from fsspec.spec import AbstractBufferedFile
7
+
8
+
9
+ def noop_file(file, mode, **kwargs):
10
+ return file
11
+
12
+
13
+ # TODO: files should also be available as contexts
14
+ # should be functions of the form func(infile, mode=, **kwargs) -> file-like
15
+ compr = {None: noop_file}
16
+
17
+
18
+ def register_compression(name, callback, extensions, force=False):
19
+ """Register an "inferable" file compression type.
20
+
21
+ Registers transparent file compression type for use with fsspec.open.
22
+ Compression can be specified by name in open, or "infer"-ed for any files
23
+ ending with the given extensions.
24
+
25
+ Args:
26
+ name: (str) The compression type name. Eg. "gzip".
27
+ callback: A callable of form (infile, mode, **kwargs) -> file-like.
28
+ Accepts an input file-like object, the target mode and kwargs.
29
+ Returns a wrapped file-like object.
30
+ extensions: (str, Iterable[str]) A file extension, or list of file
31
+ extensions for which to infer this compression scheme. Eg. "gz".
32
+ force: (bool) Force re-registration of compression type or extensions.
33
+
34
+ Raises:
35
+ ValueError: If name or extensions already registered, and not force.
36
+
37
+ """
38
+ if isinstance(extensions, str):
39
+ extensions = [extensions]
40
+
41
+ # Validate registration
42
+ if name in compr and not force:
43
+ raise ValueError(f"Duplicate compression registration: {name}")
44
+
45
+ for ext in extensions:
46
+ if ext in fsspec.utils.compressions and not force:
47
+ raise ValueError(f"Duplicate compression file extension: {ext} ({name})")
48
+
49
+ compr[name] = callback
50
+
51
+ for ext in extensions:
52
+ fsspec.utils.compressions[ext] = name
53
+
54
+
55
+ def unzip(infile, mode="rb", filename=None, **kwargs):
56
+ if "r" not in mode:
57
+ filename = filename or "file"
58
+ z = ZipFile(infile, mode="w", **kwargs)
59
+ fo = z.open(filename, mode="w")
60
+ fo.close = lambda closer=fo.close: closer() or z.close()
61
+ return fo
62
+ z = ZipFile(infile)
63
+ if filename is None:
64
+ filename = z.namelist()[0]
65
+ return z.open(filename, mode="r", **kwargs)
66
+
67
+
68
+ register_compression("zip", unzip, "zip")
69
+
70
+ try:
71
+ from bz2 import BZ2File
72
+ except ImportError:
73
+ pass
74
+ else:
75
+ register_compression("bz2", BZ2File, "bz2")
76
+
77
+ try: # pragma: no cover
78
+ from isal import igzip
79
+
80
+ def isal(infile, mode="rb", **kwargs):
81
+ return igzip.IGzipFile(fileobj=infile, mode=mode, **kwargs)
82
+
83
+ register_compression("gzip", isal, "gz")
84
+ except ImportError:
85
+ from gzip import GzipFile
86
+
87
+ register_compression(
88
+ "gzip", lambda f, **kwargs: GzipFile(fileobj=f, **kwargs), "gz"
89
+ )
90
+
91
+ try:
92
+ from lzma import LZMAFile
93
+
94
+ register_compression("lzma", LZMAFile, "lzma")
95
+ register_compression("xz", LZMAFile, "xz")
96
+ except ImportError:
97
+ pass
98
+
99
+ try:
100
+ import lzmaffi
101
+
102
+ register_compression("lzma", lzmaffi.LZMAFile, "lzma", force=True)
103
+ register_compression("xz", lzmaffi.LZMAFile, "xz", force=True)
104
+ except ImportError:
105
+ pass
106
+
107
+
108
+ class SnappyFile(AbstractBufferedFile):
109
+ def __init__(self, infile, mode, **kwargs):
110
+ import snappy
111
+
112
+ super().__init__(
113
+ fs=None, path="snappy", mode=mode.strip("b") + "b", size=999999999, **kwargs
114
+ )
115
+ self.infile = infile
116
+ if "r" in mode:
117
+ self.codec = snappy.StreamDecompressor()
118
+ else:
119
+ self.codec = snappy.StreamCompressor()
120
+
121
+ def _upload_chunk(self, final=False):
122
+ self.buffer.seek(0)
123
+ out = self.codec.add_chunk(self.buffer.read())
124
+ self.infile.write(out)
125
+ return True
126
+
127
+ def seek(self, loc, whence=0):
128
+ raise NotImplementedError("SnappyFile is not seekable")
129
+
130
+ def seekable(self):
131
+ return False
132
+
133
+ def _fetch_range(self, start, end):
134
+ """Get the specified set of bytes from remote"""
135
+ data = self.infile.read(end - start)
136
+ return self.codec.decompress(data)
137
+
138
+
139
+ try:
140
+ import snappy
141
+
142
+ snappy.compress(b"")
143
+ # Snappy may use the .sz file extension, but this is not part of the
144
+ # standard implementation.
145
+ register_compression("snappy", SnappyFile, [])
146
+
147
+ except (ImportError, NameError, AttributeError):
148
+ pass
149
+
150
+ try:
151
+ import lz4.frame
152
+
153
+ register_compression("lz4", lz4.frame.open, "lz4")
154
+ except ImportError:
155
+ pass
156
+
157
+ try:
158
+ import zstandard as zstd
159
+
160
+ def zstandard_file(infile, mode="rb"):
161
+ if "r" in mode:
162
+ cctx = zstd.ZstdDecompressor()
163
+ return cctx.stream_reader(infile)
164
+ else:
165
+ cctx = zstd.ZstdCompressor(level=10)
166
+ return cctx.stream_writer(infile)
167
+
168
+ register_compression("zstd", zstandard_file, "zst")
169
+ except ImportError:
170
+ pass
171
+
172
+
173
+ def available_compressions():
174
+ """Return a list of the implemented compressions."""
175
+ return list(compr)
mgm/lib/python3.10/site-packages/fsspec/conftest.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import shutil
3
+ import subprocess
4
+ import sys
5
+ import time
6
+
7
+ import pytest
8
+
9
+ import fsspec
10
+ from fsspec.implementations.cached import CachingFileSystem
11
+
12
+
13
+ @pytest.fixture()
14
+ def m():
15
+ """
16
+ Fixture providing a memory filesystem.
17
+ """
18
+ m = fsspec.filesystem("memory")
19
+ m.store.clear()
20
+ m.pseudo_dirs.clear()
21
+ m.pseudo_dirs.append("")
22
+ try:
23
+ yield m
24
+ finally:
25
+ m.store.clear()
26
+ m.pseudo_dirs.clear()
27
+ m.pseudo_dirs.append("")
28
+
29
+
30
+ @pytest.fixture
31
+ def ftp_writable(tmpdir):
32
+ """
33
+ Fixture providing a writable FTP filesystem.
34
+ """
35
+ pytest.importorskip("pyftpdlib")
36
+ from fsspec.implementations.ftp import FTPFileSystem
37
+
38
+ FTPFileSystem.clear_instance_cache() # remove lingering connections
39
+ CachingFileSystem.clear_instance_cache()
40
+ d = str(tmpdir)
41
+ with open(os.path.join(d, "out"), "wb") as f:
42
+ f.write(b"hello" * 10000)
43
+ P = subprocess.Popen(
44
+ [sys.executable, "-m", "pyftpdlib", "-d", d, "-u", "user", "-P", "pass", "-w"]
45
+ )
46
+ try:
47
+ time.sleep(1)
48
+ yield "localhost", 2121, "user", "pass"
49
+ finally:
50
+ P.terminate()
51
+ P.wait()
52
+ try:
53
+ shutil.rmtree(tmpdir)
54
+ except Exception:
55
+ pass
mgm/lib/python3.10/site-packages/fsspec/dircache.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ from collections.abc import MutableMapping
3
+ from functools import lru_cache
4
+
5
+
6
+ class DirCache(MutableMapping):
7
+ """
8
+ Caching of directory listings, in a structure like::
9
+
10
+ {"path0": [
11
+ {"name": "path0/file0",
12
+ "size": 123,
13
+ "type": "file",
14
+ ...
15
+ },
16
+ {"name": "path0/file1",
17
+ },
18
+ ...
19
+ ],
20
+ "path1": [...]
21
+ }
22
+
23
+ Parameters to this class control listing expiry or indeed turn
24
+ caching off
25
+ """
26
+
27
+ def __init__(
28
+ self,
29
+ use_listings_cache=True,
30
+ listings_expiry_time=None,
31
+ max_paths=None,
32
+ **kwargs,
33
+ ):
34
+ """
35
+
36
+ Parameters
37
+ ----------
38
+ use_listings_cache: bool
39
+ If False, this cache never returns items, but always reports KeyError,
40
+ and setting items has no effect
41
+ listings_expiry_time: int or float (optional)
42
+ Time in seconds that a listing is considered valid. If None,
43
+ listings do not expire.
44
+ max_paths: int (optional)
45
+ The number of most recent listings that are considered valid; 'recent'
46
+ refers to when the entry was set.
47
+ """
48
+ self._cache = {}
49
+ self._times = {}
50
+ if max_paths:
51
+ self._q = lru_cache(max_paths + 1)(lambda key: self._cache.pop(key, None))
52
+ self.use_listings_cache = use_listings_cache
53
+ self.listings_expiry_time = listings_expiry_time
54
+ self.max_paths = max_paths
55
+
56
+ def __getitem__(self, item):
57
+ if self.listings_expiry_time is not None:
58
+ if self._times.get(item, 0) - time.time() < -self.listings_expiry_time:
59
+ del self._cache[item]
60
+ if self.max_paths:
61
+ self._q(item)
62
+ return self._cache[item] # maybe raises KeyError
63
+
64
+ def clear(self):
65
+ self._cache.clear()
66
+
67
+ def __len__(self):
68
+ return len(self._cache)
69
+
70
+ def __contains__(self, item):
71
+ try:
72
+ self[item]
73
+ return True
74
+ except KeyError:
75
+ return False
76
+
77
+ def __setitem__(self, key, value):
78
+ if not self.use_listings_cache:
79
+ return
80
+ if self.max_paths:
81
+ self._q(key)
82
+ self._cache[key] = value
83
+ if self.listings_expiry_time is not None:
84
+ self._times[key] = time.time()
85
+
86
+ def __delitem__(self, key):
87
+ del self._cache[key]
88
+
89
+ def __iter__(self):
90
+ entries = list(self._cache)
91
+
92
+ return (k for k in entries if k in self)
93
+
94
+ def __reduce__(self):
95
+ return (
96
+ DirCache,
97
+ (self.use_listings_cache, self.listings_expiry_time, self.max_paths),
98
+ )
mgm/lib/python3.10/site-packages/fsspec/gui.py ADDED
@@ -0,0 +1,416 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ast
2
+ import contextlib
3
+ import logging
4
+ import os
5
+ import re
6
+ from typing import ClassVar, Sequence
7
+
8
+ import panel as pn
9
+
10
+ from .core import OpenFile, get_filesystem_class, split_protocol
11
+ from .registry import known_implementations
12
+
13
+ pn.extension()
14
+ logger = logging.getLogger("fsspec.gui")
15
+
16
+
17
+ class SigSlot:
18
+ """Signal-slot mixin, for Panel event passing
19
+
20
+ Include this class in a widget manager's superclasses to be able to
21
+ register events and callbacks on Panel widgets managed by that class.
22
+
23
+ The method ``_register`` should be called as widgets are added, and external
24
+ code should call ``connect`` to associate callbacks.
25
+
26
+ By default, all signals emit a DEBUG logging statement.
27
+ """
28
+
29
+ # names of signals that this class may emit each of which must be
30
+ # set by _register for any new instance
31
+ signals: ClassVar[Sequence[str]] = []
32
+ # names of actions that this class may respond to
33
+ slots: ClassVar[Sequence[str]] = []
34
+
35
+ # each of which must be a method name
36
+
37
+ def __init__(self):
38
+ self._ignoring_events = False
39
+ self._sigs = {}
40
+ self._map = {}
41
+ self._setup()
42
+
43
+ def _setup(self):
44
+ """Create GUI elements and register signals"""
45
+ self.panel = pn.pane.PaneBase()
46
+ # no signals to set up in the base class
47
+
48
+ def _register(
49
+ self, widget, name, thing="value", log_level=logging.DEBUG, auto=False
50
+ ):
51
+ """Watch the given attribute of a widget and assign it a named event
52
+
53
+ This is normally called at the time a widget is instantiated, in the
54
+ class which owns it.
55
+
56
+ Parameters
57
+ ----------
58
+ widget : pn.layout.Panel or None
59
+ Widget to watch. If None, an anonymous signal not associated with
60
+ any widget.
61
+ name : str
62
+ Name of this event
63
+ thing : str
64
+ Attribute of the given widget to watch
65
+ log_level : int
66
+ When the signal is triggered, a logging event of the given level
67
+ will be fired in the dfviz logger.
68
+ auto : bool
69
+ If True, automatically connects with a method in this class of the
70
+ same name.
71
+ """
72
+ if name not in self.signals:
73
+ raise ValueError(f"Attempt to assign an undeclared signal: {name}")
74
+ self._sigs[name] = {
75
+ "widget": widget,
76
+ "callbacks": [],
77
+ "thing": thing,
78
+ "log": log_level,
79
+ }
80
+ wn = "-".join(
81
+ [
82
+ getattr(widget, "name", str(widget)) if widget is not None else "none",
83
+ thing,
84
+ ]
85
+ )
86
+ self._map[wn] = name
87
+ if widget is not None:
88
+ widget.param.watch(self._signal, thing, onlychanged=True)
89
+ if auto and hasattr(self, name):
90
+ self.connect(name, getattr(self, name))
91
+
92
+ def _repr_mimebundle_(self, *args, **kwargs):
93
+ """Display in a notebook or a server"""
94
+ try:
95
+ return self.panel._repr_mimebundle_(*args, **kwargs)
96
+ except (ValueError, AttributeError) as exc:
97
+ raise NotImplementedError(
98
+ "Panel does not seem to be set up properly"
99
+ ) from exc
100
+
101
+ def connect(self, signal, slot):
102
+ """Associate call back with given event
103
+
104
+ The callback must be a function which takes the "new" value of the
105
+ watched attribute as the only parameter. If the callback return False,
106
+ this cancels any further processing of the given event.
107
+
108
+ Alternatively, the callback can be a string, in which case it means
109
+ emitting the correspondingly-named event (i.e., connect to self)
110
+ """
111
+ self._sigs[signal]["callbacks"].append(slot)
112
+
113
+ def _signal(self, event):
114
+ """This is called by a an action on a widget
115
+
116
+ Within an self.ignore_events context, nothing happens.
117
+
118
+ Tests can execute this method by directly changing the values of
119
+ widget components.
120
+ """
121
+ if not self._ignoring_events:
122
+ wn = "-".join([event.obj.name, event.name])
123
+ if wn in self._map and self._map[wn] in self._sigs:
124
+ self._emit(self._map[wn], event.new)
125
+
126
+ @contextlib.contextmanager
127
+ def ignore_events(self):
128
+ """Temporarily turn off events processing in this instance
129
+
130
+ (does not propagate to children)
131
+ """
132
+ self._ignoring_events = True
133
+ try:
134
+ yield
135
+ finally:
136
+ self._ignoring_events = False
137
+
138
+ def _emit(self, sig, value=None):
139
+ """An event happened, call its callbacks
140
+
141
+ This method can be used in tests to simulate message passing without
142
+ directly changing visual elements.
143
+
144
+ Calling of callbacks will halt whenever one returns False.
145
+ """
146
+ logger.log(self._sigs[sig]["log"], f"{sig}: {value}")
147
+ for callback in self._sigs[sig]["callbacks"]:
148
+ if isinstance(callback, str):
149
+ self._emit(callback)
150
+ else:
151
+ try:
152
+ # running callbacks should not break the interface
153
+ ret = callback(value)
154
+ if ret is False:
155
+ break
156
+ except Exception as e:
157
+ logger.exception(
158
+ "Exception (%s) while executing callback for signal: %s",
159
+ e,
160
+ sig,
161
+ )
162
+
163
+ def show(self, threads=False):
164
+ """Open a new browser tab and display this instance's interface"""
165
+ self.panel.show(threads=threads, verbose=False)
166
+ return self
167
+
168
+
169
+ class SingleSelect(SigSlot):
170
+ """A multiselect which only allows you to select one item for an event"""
171
+
172
+ signals = ["_selected", "selected"] # the first is internal
173
+ slots = ["set_options", "set_selection", "add", "clear", "select"]
174
+
175
+ def __init__(self, **kwargs):
176
+ self.kwargs = kwargs
177
+ super().__init__()
178
+
179
+ def _setup(self):
180
+ self.panel = pn.widgets.MultiSelect(**self.kwargs)
181
+ self._register(self.panel, "_selected", "value")
182
+ self._register(None, "selected")
183
+ self.connect("_selected", self.select_one)
184
+
185
+ def _signal(self, *args, **kwargs):
186
+ super()._signal(*args, **kwargs)
187
+
188
+ def select_one(self, *_):
189
+ with self.ignore_events():
190
+ val = [self.panel.value[-1]] if self.panel.value else []
191
+ self.panel.value = val
192
+ self._emit("selected", self.panel.value)
193
+
194
+ def set_options(self, options):
195
+ self.panel.options = options
196
+
197
+ def clear(self):
198
+ self.panel.options = []
199
+
200
+ @property
201
+ def value(self):
202
+ return self.panel.value
203
+
204
+ def set_selection(self, selection):
205
+ self.panel.value = [selection]
206
+
207
+
208
+ class FileSelector(SigSlot):
209
+ """Panel-based graphical file selector widget
210
+
211
+ Instances of this widget are interactive and can be displayed in jupyter by having
212
+ them as the output of a cell, or in a separate browser tab using ``.show()``.
213
+ """
214
+
215
+ signals = [
216
+ "protocol_changed",
217
+ "selection_changed",
218
+ "directory_entered",
219
+ "home_clicked",
220
+ "up_clicked",
221
+ "go_clicked",
222
+ "filters_changed",
223
+ ]
224
+ slots = ["set_filters", "go_home"]
225
+
226
+ def __init__(self, url=None, filters=None, ignore=None, kwargs=None):
227
+ """
228
+
229
+ Parameters
230
+ ----------
231
+ url : str (optional)
232
+ Initial value of the URL to populate the dialog; should include protocol
233
+ filters : list(str) (optional)
234
+ File endings to include in the listings. If not included, all files are
235
+ allowed. Does not affect directories.
236
+ If given, the endings will appear as checkboxes in the interface
237
+ ignore : list(str) (optional)
238
+ Regex(s) of file basename patterns to ignore, e.g., "\\." for typical
239
+ hidden files on posix
240
+ kwargs : dict (optional)
241
+ To pass to file system instance
242
+ """
243
+ if url:
244
+ self.init_protocol, url = split_protocol(url)
245
+ else:
246
+ self.init_protocol, url = "file", os.getcwd()
247
+ self.init_url = url
248
+ self.init_kwargs = (kwargs if isinstance(kwargs, str) else str(kwargs)) or "{}"
249
+ self.filters = filters
250
+ self.ignore = [re.compile(i) for i in ignore or []]
251
+ self._fs = None
252
+ super().__init__()
253
+
254
+ def _setup(self):
255
+ self.url = pn.widgets.TextInput(
256
+ name="url",
257
+ value=self.init_url,
258
+ align="end",
259
+ sizing_mode="stretch_width",
260
+ width_policy="max",
261
+ )
262
+ self.protocol = pn.widgets.Select(
263
+ options=sorted(known_implementations),
264
+ value=self.init_protocol,
265
+ name="protocol",
266
+ align="center",
267
+ )
268
+ self.kwargs = pn.widgets.TextInput(
269
+ name="kwargs", value=self.init_kwargs, align="center"
270
+ )
271
+ self.go = pn.widgets.Button(name="⇨", align="end", width=45)
272
+ self.main = SingleSelect(size=10)
273
+ self.home = pn.widgets.Button(name="🏠", width=40, height=30, align="end")
274
+ self.up = pn.widgets.Button(name="‹", width=30, height=30, align="end")
275
+
276
+ self._register(self.protocol, "protocol_changed", auto=True)
277
+ self._register(self.go, "go_clicked", "clicks", auto=True)
278
+ self._register(self.up, "up_clicked", "clicks", auto=True)
279
+ self._register(self.home, "home_clicked", "clicks", auto=True)
280
+ self._register(None, "selection_changed")
281
+ self.main.connect("selected", self.selection_changed)
282
+ self._register(None, "directory_entered")
283
+ self.prev_protocol = self.protocol.value
284
+ self.prev_kwargs = self.storage_options
285
+
286
+ self.filter_sel = pn.widgets.CheckBoxGroup(
287
+ value=[], options=[], inline=False, align="end", width_policy="min"
288
+ )
289
+ self._register(self.filter_sel, "filters_changed", auto=True)
290
+
291
+ self.panel = pn.Column(
292
+ pn.Row(self.protocol, self.kwargs),
293
+ pn.Row(self.home, self.up, self.url, self.go, self.filter_sel),
294
+ self.main.panel,
295
+ )
296
+ self.set_filters(self.filters)
297
+ self.go_clicked()
298
+
299
+ def set_filters(self, filters=None):
300
+ self.filters = filters
301
+ if filters:
302
+ self.filter_sel.options = filters
303
+ self.filter_sel.value = filters
304
+ else:
305
+ self.filter_sel.options = []
306
+ self.filter_sel.value = []
307
+
308
+ @property
309
+ def storage_options(self):
310
+ """Value of the kwargs box as a dictionary"""
311
+ return ast.literal_eval(self.kwargs.value) or {}
312
+
313
+ @property
314
+ def fs(self):
315
+ """Current filesystem instance"""
316
+ if self._fs is None:
317
+ cls = get_filesystem_class(self.protocol.value)
318
+ self._fs = cls(**self.storage_options)
319
+ return self._fs
320
+
321
+ @property
322
+ def urlpath(self):
323
+ """URL of currently selected item"""
324
+ return (
325
+ (f"{self.protocol.value}://{self.main.value[0]}")
326
+ if self.main.value
327
+ else None
328
+ )
329
+
330
+ def open_file(self, mode="rb", compression=None, encoding=None):
331
+ """Create OpenFile instance for the currently selected item
332
+
333
+ For example, in a notebook you might do something like
334
+
335
+ .. code-block::
336
+
337
+ [ ]: sel = FileSelector(); sel
338
+
339
+ # user selects their file
340
+
341
+ [ ]: with sel.open_file('rb') as f:
342
+ ... out = f.read()
343
+
344
+ Parameters
345
+ ----------
346
+ mode: str (optional)
347
+ Open mode for the file.
348
+ compression: str (optional)
349
+ The interact with the file as compressed. Set to 'infer' to guess
350
+ compression from the file ending
351
+ encoding: str (optional)
352
+ If using text mode, use this encoding; defaults to UTF8.
353
+ """
354
+ if self.urlpath is None:
355
+ raise ValueError("No file selected")
356
+ return OpenFile(self.fs, self.urlpath, mode, compression, encoding)
357
+
358
+ def filters_changed(self, values):
359
+ self.filters = values
360
+ self.go_clicked()
361
+
362
+ def selection_changed(self, *_):
363
+ if self.urlpath is None:
364
+ return
365
+ if self.fs.isdir(self.urlpath):
366
+ self.url.value = self.fs._strip_protocol(self.urlpath)
367
+ self.go_clicked()
368
+
369
+ def go_clicked(self, *_):
370
+ if (
371
+ self.prev_protocol != self.protocol.value
372
+ or self.prev_kwargs != self.storage_options
373
+ ):
374
+ self._fs = None # causes fs to be recreated
375
+ self.prev_protocol = self.protocol.value
376
+ self.prev_kwargs = self.storage_options
377
+ listing = sorted(
378
+ self.fs.ls(self.url.value, detail=True), key=lambda x: x["name"]
379
+ )
380
+ listing = [
381
+ l
382
+ for l in listing
383
+ if not any(i.match(l["name"].rsplit("/", 1)[-1]) for i in self.ignore)
384
+ ]
385
+ folders = {
386
+ "📁 " + o["name"].rsplit("/", 1)[-1]: o["name"]
387
+ for o in listing
388
+ if o["type"] == "directory"
389
+ }
390
+ files = {
391
+ "📄 " + o["name"].rsplit("/", 1)[-1]: o["name"]
392
+ for o in listing
393
+ if o["type"] == "file"
394
+ }
395
+ if self.filters:
396
+ files = {
397
+ k: v
398
+ for k, v in files.items()
399
+ if any(v.endswith(ext) for ext in self.filters)
400
+ }
401
+ self.main.set_options(dict(**folders, **files))
402
+
403
+ def protocol_changed(self, *_):
404
+ self._fs = None
405
+ self.main.options = []
406
+ self.url.value = ""
407
+
408
+ def home_clicked(self, *_):
409
+ self.protocol.value = self.init_protocol
410
+ self.kwargs.value = self.init_kwargs
411
+ self.url.value = self.init_url
412
+ self.go_clicked()
413
+
414
+ def up_clicked(self, *_):
415
+ self.url.value = self.fs._parent(self.url.value)
416
+ self.go_clicked()
mgm/lib/python3.10/site-packages/fsspec/implementations/__init__.py ADDED
File without changes
mgm/lib/python3.10/site-packages/fsspec/implementations/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (171 Bytes). View file
 
mgm/lib/python3.10/site-packages/fsspec/implementations/__pycache__/cached.cpython-310.pyc ADDED
Binary file (28.8 kB). View file
 
mgm/lib/python3.10/site-packages/fsspec/implementations/__pycache__/dask.cpython-310.pyc ADDED
Binary file (4.74 kB). View file
 
mgm/lib/python3.10/site-packages/fsspec/implementations/__pycache__/dbfs.cpython-310.pyc ADDED
Binary file (13.7 kB). View file
 
mgm/lib/python3.10/site-packages/fsspec/implementations/__pycache__/dirfs.cpython-310.pyc ADDED
Binary file (14 kB). View file
 
mgm/lib/python3.10/site-packages/fsspec/implementations/__pycache__/ftp.cpython-310.pyc ADDED
Binary file (11.4 kB). View file
 
mgm/lib/python3.10/site-packages/fsspec/implementations/__pycache__/git.cpython-310.pyc ADDED
Binary file (4.18 kB). View file
 
mgm/lib/python3.10/site-packages/fsspec/implementations/__pycache__/http.cpython-310.pyc ADDED
Binary file (24 kB). View file
 
mgm/lib/python3.10/site-packages/fsspec/implementations/__pycache__/jupyter.cpython-310.pyc ADDED
Binary file (4.19 kB). View file
 
mgm/lib/python3.10/site-packages/fsspec/implementations/__pycache__/local.cpython-310.pyc ADDED
Binary file (14.5 kB). View file
 
mgm/lib/python3.10/site-packages/fsspec/implementations/__pycache__/reference.cpython-310.pyc ADDED
Binary file (38.5 kB). View file
 
mgm/lib/python3.10/site-packages/fsspec/implementations/__pycache__/sftp.cpython-310.pyc ADDED
Binary file (6.32 kB). View file
 
mgm/lib/python3.10/site-packages/fsspec/implementations/__pycache__/webhdfs.cpython-310.pyc ADDED
Binary file (16 kB). View file
 
mgm/lib/python3.10/site-packages/fsspec/implementations/__pycache__/zip.cpython-310.pyc ADDED
Binary file (5.79 kB). View file
 
mgm/lib/python3.10/site-packages/fsspec/implementations/arrow.py ADDED
@@ -0,0 +1,304 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import errno
2
+ import io
3
+ import os
4
+ import secrets
5
+ import shutil
6
+ from contextlib import suppress
7
+ from functools import cached_property, wraps
8
+ from urllib.parse import parse_qs
9
+
10
+ from fsspec.spec import AbstractFileSystem
11
+ from fsspec.utils import (
12
+ get_package_version_without_import,
13
+ infer_storage_options,
14
+ mirror_from,
15
+ tokenize,
16
+ )
17
+
18
+
19
+ def wrap_exceptions(func):
20
+ @wraps(func)
21
+ def wrapper(*args, **kwargs):
22
+ try:
23
+ return func(*args, **kwargs)
24
+ except OSError as exception:
25
+ if not exception.args:
26
+ raise
27
+
28
+ message, *args = exception.args
29
+ if isinstance(message, str) and "does not exist" in message:
30
+ raise FileNotFoundError(errno.ENOENT, message) from exception
31
+ else:
32
+ raise
33
+
34
+ return wrapper
35
+
36
+
37
+ PYARROW_VERSION = None
38
+
39
+
40
+ class ArrowFSWrapper(AbstractFileSystem):
41
+ """FSSpec-compatible wrapper of pyarrow.fs.FileSystem.
42
+
43
+ Parameters
44
+ ----------
45
+ fs : pyarrow.fs.FileSystem
46
+
47
+ """
48
+
49
+ root_marker = "/"
50
+
51
+ def __init__(self, fs, **kwargs):
52
+ global PYARROW_VERSION
53
+ PYARROW_VERSION = get_package_version_without_import("pyarrow")
54
+ self.fs = fs
55
+ super().__init__(**kwargs)
56
+
57
+ @property
58
+ def protocol(self):
59
+ return self.fs.type_name
60
+
61
+ @cached_property
62
+ def fsid(self):
63
+ return "hdfs_" + tokenize(self.fs.host, self.fs.port)
64
+
65
+ @classmethod
66
+ def _strip_protocol(cls, path):
67
+ ops = infer_storage_options(path)
68
+ path = ops["path"]
69
+ if path.startswith("//"):
70
+ # special case for "hdfs://path" (without the triple slash)
71
+ path = path[1:]
72
+ return path
73
+
74
+ def ls(self, path, detail=False, **kwargs):
75
+ path = self._strip_protocol(path)
76
+ from pyarrow.fs import FileSelector
77
+
78
+ entries = [
79
+ self._make_entry(entry)
80
+ for entry in self.fs.get_file_info(FileSelector(path))
81
+ ]
82
+ if detail:
83
+ return entries
84
+ else:
85
+ return [entry["name"] for entry in entries]
86
+
87
+ def info(self, path, **kwargs):
88
+ path = self._strip_protocol(path)
89
+ [info] = self.fs.get_file_info([path])
90
+ return self._make_entry(info)
91
+
92
+ def exists(self, path):
93
+ path = self._strip_protocol(path)
94
+ try:
95
+ self.info(path)
96
+ except FileNotFoundError:
97
+ return False
98
+ else:
99
+ return True
100
+
101
+ def _make_entry(self, info):
102
+ from pyarrow.fs import FileType
103
+
104
+ if info.type is FileType.Directory:
105
+ kind = "directory"
106
+ elif info.type is FileType.File:
107
+ kind = "file"
108
+ elif info.type is FileType.NotFound:
109
+ raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), info.path)
110
+ else:
111
+ kind = "other"
112
+
113
+ return {
114
+ "name": info.path,
115
+ "size": info.size,
116
+ "type": kind,
117
+ "mtime": info.mtime,
118
+ }
119
+
120
+ @wrap_exceptions
121
+ def cp_file(self, path1, path2, **kwargs):
122
+ path1 = self._strip_protocol(path1).rstrip("/")
123
+ path2 = self._strip_protocol(path2).rstrip("/")
124
+
125
+ with self._open(path1, "rb") as lstream:
126
+ tmp_fname = f"{path2}.tmp.{secrets.token_hex(6)}"
127
+ try:
128
+ with self.open(tmp_fname, "wb") as rstream:
129
+ shutil.copyfileobj(lstream, rstream)
130
+ self.fs.move(tmp_fname, path2)
131
+ except BaseException:
132
+ with suppress(FileNotFoundError):
133
+ self.fs.delete_file(tmp_fname)
134
+ raise
135
+
136
+ @wrap_exceptions
137
+ def mv(self, path1, path2, **kwargs):
138
+ path1 = self._strip_protocol(path1).rstrip("/")
139
+ path2 = self._strip_protocol(path2).rstrip("/")
140
+ self.fs.move(path1, path2)
141
+
142
+ @wrap_exceptions
143
+ def rm_file(self, path):
144
+ path = self._strip_protocol(path)
145
+ self.fs.delete_file(path)
146
+
147
+ @wrap_exceptions
148
+ def rm(self, path, recursive=False, maxdepth=None):
149
+ path = self._strip_protocol(path).rstrip("/")
150
+ if self.isdir(path):
151
+ if recursive:
152
+ self.fs.delete_dir(path)
153
+ else:
154
+ raise ValueError("Can't delete directories without recursive=False")
155
+ else:
156
+ self.fs.delete_file(path)
157
+
158
+ @wrap_exceptions
159
+ def _open(self, path, mode="rb", block_size=None, seekable=True, **kwargs):
160
+ if mode == "rb":
161
+ if seekable:
162
+ method = self.fs.open_input_file
163
+ else:
164
+ method = self.fs.open_input_stream
165
+ elif mode == "wb":
166
+ method = self.fs.open_output_stream
167
+ elif mode == "ab":
168
+ method = self.fs.open_append_stream
169
+ else:
170
+ raise ValueError(f"unsupported mode for Arrow filesystem: {mode!r}")
171
+
172
+ _kwargs = {}
173
+ if mode != "rb" or not seekable:
174
+ if int(PYARROW_VERSION.split(".")[0]) >= 4:
175
+ # disable compression auto-detection
176
+ _kwargs["compression"] = None
177
+ stream = method(path, **_kwargs)
178
+
179
+ return ArrowFile(self, stream, path, mode, block_size, **kwargs)
180
+
181
+ @wrap_exceptions
182
+ def mkdir(self, path, create_parents=True, **kwargs):
183
+ path = self._strip_protocol(path)
184
+ if create_parents:
185
+ self.makedirs(path, exist_ok=True)
186
+ else:
187
+ self.fs.create_dir(path, recursive=False)
188
+
189
+ @wrap_exceptions
190
+ def makedirs(self, path, exist_ok=False):
191
+ path = self._strip_protocol(path)
192
+ self.fs.create_dir(path, recursive=True)
193
+
194
+ @wrap_exceptions
195
+ def rmdir(self, path):
196
+ path = self._strip_protocol(path)
197
+ self.fs.delete_dir(path)
198
+
199
+ @wrap_exceptions
200
+ def modified(self, path):
201
+ path = self._strip_protocol(path)
202
+ return self.fs.get_file_info(path).mtime
203
+
204
+ def cat_file(self, path, start=None, end=None, **kwargs):
205
+ kwargs["seekable"] = start not in [None, 0]
206
+ return super().cat_file(path, start=None, end=None, **kwargs)
207
+
208
+ def get_file(self, rpath, lpath, **kwargs):
209
+ kwargs["seekable"] = False
210
+ super().get_file(rpath, lpath, **kwargs)
211
+
212
+
213
+ @mirror_from(
214
+ "stream",
215
+ [
216
+ "read",
217
+ "seek",
218
+ "tell",
219
+ "write",
220
+ "readable",
221
+ "writable",
222
+ "close",
223
+ "size",
224
+ "seekable",
225
+ ],
226
+ )
227
+ class ArrowFile(io.IOBase):
228
+ def __init__(self, fs, stream, path, mode, block_size=None, **kwargs):
229
+ self.path = path
230
+ self.mode = mode
231
+
232
+ self.fs = fs
233
+ self.stream = stream
234
+
235
+ self.blocksize = self.block_size = block_size
236
+ self.kwargs = kwargs
237
+
238
+ def __enter__(self):
239
+ return self
240
+
241
+ def __exit__(self, *args):
242
+ return self.close()
243
+
244
+
245
+ class HadoopFileSystem(ArrowFSWrapper):
246
+ """A wrapper on top of the pyarrow.fs.HadoopFileSystem
247
+ to connect it's interface with fsspec"""
248
+
249
+ protocol = "hdfs"
250
+
251
+ def __init__(
252
+ self,
253
+ host="default",
254
+ port=0,
255
+ user=None,
256
+ kerb_ticket=None,
257
+ replication=3,
258
+ extra_conf=None,
259
+ **kwargs,
260
+ ):
261
+ """
262
+
263
+ Parameters
264
+ ----------
265
+ host: str
266
+ Hostname, IP or "default" to try to read from Hadoop config
267
+ port: int
268
+ Port to connect on, or default from Hadoop config if 0
269
+ user: str or None
270
+ If given, connect as this username
271
+ kerb_ticket: str or None
272
+ If given, use this ticket for authentication
273
+ replication: int
274
+ set replication factor of file for write operations. default value is 3.
275
+ extra_conf: None or dict
276
+ Passed on to HadoopFileSystem
277
+ """
278
+ from pyarrow.fs import HadoopFileSystem
279
+
280
+ fs = HadoopFileSystem(
281
+ host=host,
282
+ port=port,
283
+ user=user,
284
+ kerb_ticket=kerb_ticket,
285
+ replication=replication,
286
+ extra_conf=extra_conf,
287
+ )
288
+ super().__init__(fs=fs, **kwargs)
289
+
290
+ @staticmethod
291
+ def _get_kwargs_from_urls(path):
292
+ ops = infer_storage_options(path)
293
+ out = {}
294
+ if ops.get("host", None):
295
+ out["host"] = ops["host"]
296
+ if ops.get("username", None):
297
+ out["user"] = ops["username"]
298
+ if ops.get("port", None):
299
+ out["port"] = ops["port"]
300
+ if ops.get("url_query", None):
301
+ queries = parse_qs(ops["url_query"])
302
+ if queries.get("replication", None):
303
+ out["replication"] = int(queries["replication"][0])
304
+ return out
mgm/lib/python3.10/site-packages/fsspec/implementations/asyn_wrapper.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import functools
3
+ import inspect
4
+
5
+ from fsspec.asyn import AsyncFileSystem
6
+
7
+
8
+ def async_wrapper(func, obj=None):
9
+ """
10
+ Wraps a synchronous function to make it awaitable.
11
+
12
+ Parameters
13
+ ----------
14
+ func : callable
15
+ The synchronous function to wrap.
16
+ obj : object, optional
17
+ The instance to bind the function to, if applicable.
18
+
19
+ Returns
20
+ -------
21
+ coroutine
22
+ An awaitable version of the function.
23
+ """
24
+
25
+ @functools.wraps(func)
26
+ async def wrapper(*args, **kwargs):
27
+ return await asyncio.to_thread(func, *args, **kwargs)
28
+
29
+ return wrapper
30
+
31
+
32
+ class AsyncFileSystemWrapper(AsyncFileSystem):
33
+ """
34
+ A wrapper class to convert a synchronous filesystem into an asynchronous one.
35
+
36
+ This class takes an existing synchronous filesystem implementation and wraps all
37
+ its methods to provide an asynchronous interface.
38
+
39
+ Parameters
40
+ ----------
41
+ sync_fs : AbstractFileSystem
42
+ The synchronous filesystem instance to wrap.
43
+ """
44
+
45
+ def __init__(self, sync_fs, *args, **kwargs):
46
+ super().__init__(*args, **kwargs)
47
+ self.asynchronous = True
48
+ self.sync_fs = sync_fs
49
+ self.protocol = self.sync_fs.protocol
50
+ self._wrap_all_sync_methods()
51
+
52
+ @property
53
+ def fsid(self):
54
+ return f"async_{self.sync_fs.fsid}"
55
+
56
+ def _wrap_all_sync_methods(self):
57
+ """
58
+ Wrap all synchronous methods of the underlying filesystem with asynchronous versions.
59
+ """
60
+ for method_name in dir(self.sync_fs):
61
+ if method_name.startswith("_"):
62
+ continue
63
+
64
+ attr = inspect.getattr_static(self.sync_fs, method_name)
65
+ if isinstance(attr, property):
66
+ continue
67
+
68
+ method = getattr(self.sync_fs, method_name)
69
+ if callable(method) and not asyncio.iscoroutinefunction(method):
70
+ async_method = async_wrapper(method, obj=self)
71
+ setattr(self, f"_{method_name}", async_method)
72
+
73
+ @classmethod
74
+ def wrap_class(cls, sync_fs_class):
75
+ """
76
+ Create a new class that can be used to instantiate an AsyncFileSystemWrapper
77
+ with lazy instantiation of the underlying synchronous filesystem.
78
+
79
+ Parameters
80
+ ----------
81
+ sync_fs_class : type
82
+ The class of the synchronous filesystem to wrap.
83
+
84
+ Returns
85
+ -------
86
+ type
87
+ A new class that wraps the provided synchronous filesystem class.
88
+ """
89
+
90
+ class GeneratedAsyncFileSystemWrapper(cls):
91
+ def __init__(self, *args, **kwargs):
92
+ sync_fs = sync_fs_class(*args, **kwargs)
93
+ super().__init__(sync_fs)
94
+
95
+ GeneratedAsyncFileSystemWrapper.__name__ = (
96
+ f"Async{sync_fs_class.__name__}Wrapper"
97
+ )
98
+ return GeneratedAsyncFileSystemWrapper
mgm/lib/python3.10/site-packages/fsspec/implementations/cache_mapper.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import abc
4
+ import hashlib
5
+
6
+ from fsspec.implementations.local import make_path_posix
7
+
8
+
9
+ class AbstractCacheMapper(abc.ABC):
10
+ """Abstract super-class for mappers from remote URLs to local cached
11
+ basenames.
12
+ """
13
+
14
+ @abc.abstractmethod
15
+ def __call__(self, path: str) -> str: ...
16
+
17
+ def __eq__(self, other: object) -> bool:
18
+ # Identity only depends on class. When derived classes have attributes
19
+ # they will need to be included.
20
+ return isinstance(other, type(self))
21
+
22
+ def __hash__(self) -> int:
23
+ # Identity only depends on class. When derived classes have attributes
24
+ # they will need to be included.
25
+ return hash(type(self))
26
+
27
+
28
+ class BasenameCacheMapper(AbstractCacheMapper):
29
+ """Cache mapper that uses the basename of the remote URL and a fixed number
30
+ of directory levels above this.
31
+
32
+ The default is zero directory levels, meaning different paths with the same
33
+ basename will have the same cached basename.
34
+ """
35
+
36
+ def __init__(self, directory_levels: int = 0):
37
+ if directory_levels < 0:
38
+ raise ValueError(
39
+ "BasenameCacheMapper requires zero or positive directory_levels"
40
+ )
41
+ self.directory_levels = directory_levels
42
+
43
+ # Separator for directories when encoded as strings.
44
+ self._separator = "_@_"
45
+
46
+ def __call__(self, path: str) -> str:
47
+ path = make_path_posix(path)
48
+ prefix, *bits = path.rsplit("/", self.directory_levels + 1)
49
+ if bits:
50
+ return self._separator.join(bits)
51
+ else:
52
+ return prefix # No separator found, simple filename
53
+
54
+ def __eq__(self, other: object) -> bool:
55
+ return super().__eq__(other) and self.directory_levels == other.directory_levels
56
+
57
+ def __hash__(self) -> int:
58
+ return super().__hash__() ^ hash(self.directory_levels)
59
+
60
+
61
+ class HashCacheMapper(AbstractCacheMapper):
62
+ """Cache mapper that uses a hash of the remote URL."""
63
+
64
+ def __call__(self, path: str) -> str:
65
+ return hashlib.sha256(path.encode()).hexdigest()
66
+
67
+
68
+ def create_cache_mapper(same_names: bool) -> AbstractCacheMapper:
69
+ """Factory method to create cache mapper for backward compatibility with
70
+ ``CachingFileSystem`` constructor using ``same_names`` kwarg.
71
+ """
72
+ if same_names:
73
+ return BasenameCacheMapper()
74
+ else:
75
+ return HashCacheMapper()
mgm/lib/python3.10/site-packages/fsspec/implementations/cache_metadata.py ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ import pickle
5
+ import time
6
+ from typing import TYPE_CHECKING
7
+
8
+ from fsspec.utils import atomic_write
9
+
10
+ try:
11
+ import ujson as json
12
+ except ImportError:
13
+ if not TYPE_CHECKING:
14
+ import json
15
+
16
+ if TYPE_CHECKING:
17
+ from typing import Any, Dict, Iterator, Literal
18
+
19
+ from typing_extensions import TypeAlias
20
+
21
+ from .cached import CachingFileSystem
22
+
23
+ Detail: TypeAlias = Dict[str, Any]
24
+
25
+
26
+ class CacheMetadata:
27
+ """Cache metadata.
28
+
29
+ All reading and writing of cache metadata is performed by this class,
30
+ accessing the cached files and blocks is not.
31
+
32
+ Metadata is stored in a single file per storage directory in JSON format.
33
+ For backward compatibility, also reads metadata stored in pickle format
34
+ which is converted to JSON when next saved.
35
+ """
36
+
37
+ def __init__(self, storage: list[str]):
38
+ """
39
+
40
+ Parameters
41
+ ----------
42
+ storage: list[str]
43
+ Directories containing cached files, must be at least one. Metadata
44
+ is stored in the last of these directories by convention.
45
+ """
46
+ if not storage:
47
+ raise ValueError("CacheMetadata expects at least one storage location")
48
+
49
+ self._storage = storage
50
+ self.cached_files: list[Detail] = [{}]
51
+
52
+ # Private attribute to force saving of metadata in pickle format rather than
53
+ # JSON for use in tests to confirm can read both pickle and JSON formats.
54
+ self._force_save_pickle = False
55
+
56
+ def _load(self, fn: str) -> Detail:
57
+ """Low-level function to load metadata from specific file"""
58
+ try:
59
+ with open(fn, "r") as f:
60
+ loaded = json.load(f)
61
+ except ValueError:
62
+ with open(fn, "rb") as f:
63
+ loaded = pickle.load(f)
64
+ for c in loaded.values():
65
+ if isinstance(c.get("blocks"), list):
66
+ c["blocks"] = set(c["blocks"])
67
+ return loaded
68
+
69
+ def _save(self, metadata_to_save: Detail, fn: str) -> None:
70
+ """Low-level function to save metadata to specific file"""
71
+ if self._force_save_pickle:
72
+ with atomic_write(fn) as f:
73
+ pickle.dump(metadata_to_save, f)
74
+ else:
75
+ with atomic_write(fn, mode="w") as f:
76
+ json.dump(metadata_to_save, f)
77
+
78
+ def _scan_locations(
79
+ self, writable_only: bool = False
80
+ ) -> Iterator[tuple[str, str, bool]]:
81
+ """Yield locations (filenames) where metadata is stored, and whether
82
+ writable or not.
83
+
84
+ Parameters
85
+ ----------
86
+ writable: bool
87
+ Set to True to only yield writable locations.
88
+
89
+ Returns
90
+ -------
91
+ Yields (str, str, bool)
92
+ """
93
+ n = len(self._storage)
94
+ for i, storage in enumerate(self._storage):
95
+ writable = i == n - 1
96
+ if writable_only and not writable:
97
+ continue
98
+ yield os.path.join(storage, "cache"), storage, writable
99
+
100
+ def check_file(
101
+ self, path: str, cfs: CachingFileSystem | None
102
+ ) -> Literal[False] | tuple[Detail, str]:
103
+ """If path is in cache return its details, otherwise return ``False``.
104
+
105
+ If the optional CachingFileSystem is specified then it is used to
106
+ perform extra checks to reject possible matches, such as if they are
107
+ too old.
108
+ """
109
+ for (fn, base, _), cache in zip(self._scan_locations(), self.cached_files):
110
+ if path not in cache:
111
+ continue
112
+ detail = cache[path].copy()
113
+
114
+ if cfs is not None:
115
+ if cfs.check_files and detail["uid"] != cfs.fs.ukey(path):
116
+ # Wrong file as determined by hash of file properties
117
+ continue
118
+ if cfs.expiry and time.time() - detail["time"] > cfs.expiry:
119
+ # Cached file has expired
120
+ continue
121
+
122
+ fn = os.path.join(base, detail["fn"])
123
+ if os.path.exists(fn):
124
+ return detail, fn
125
+ return False
126
+
127
+ def clear_expired(self, expiry_time: int) -> tuple[list[str], bool]:
128
+ """Remove expired metadata from the cache.
129
+
130
+ Returns names of files corresponding to expired metadata and a boolean
131
+ flag indicating whether the writable cache is empty. Caller is
132
+ responsible for deleting the expired files.
133
+ """
134
+ expired_files = []
135
+ for path, detail in self.cached_files[-1].copy().items():
136
+ if time.time() - detail["time"] > expiry_time:
137
+ fn = detail.get("fn", "")
138
+ if not fn:
139
+ raise RuntimeError(
140
+ f"Cache metadata does not contain 'fn' for {path}"
141
+ )
142
+ fn = os.path.join(self._storage[-1], fn)
143
+ expired_files.append(fn)
144
+ self.cached_files[-1].pop(path)
145
+
146
+ if self.cached_files[-1]:
147
+ cache_path = os.path.join(self._storage[-1], "cache")
148
+ self._save(self.cached_files[-1], cache_path)
149
+
150
+ writable_cache_empty = not self.cached_files[-1]
151
+ return expired_files, writable_cache_empty
152
+
153
+ def load(self) -> None:
154
+ """Load all metadata from disk and store in ``self.cached_files``"""
155
+ cached_files = []
156
+ for fn, _, _ in self._scan_locations():
157
+ if os.path.exists(fn):
158
+ # TODO: consolidate blocks here
159
+ cached_files.append(self._load(fn))
160
+ else:
161
+ cached_files.append({})
162
+ self.cached_files = cached_files or [{}]
163
+
164
+ def on_close_cached_file(self, f: Any, path: str) -> None:
165
+ """Perform side-effect actions on closing a cached file.
166
+
167
+ The actual closing of the file is the responsibility of the caller.
168
+ """
169
+ # File must be writeble, so in self.cached_files[-1]
170
+ c = self.cached_files[-1][path]
171
+ if c["blocks"] is not True and len(c["blocks"]) * f.blocksize >= f.size:
172
+ c["blocks"] = True
173
+
174
+ def pop_file(self, path: str) -> str | None:
175
+ """Remove metadata of cached file.
176
+
177
+ If path is in the cache, return the filename of the cached file,
178
+ otherwise return ``None``. Caller is responsible for deleting the
179
+ cached file.
180
+ """
181
+ details = self.check_file(path, None)
182
+ if not details:
183
+ return None
184
+ _, fn = details
185
+ if fn.startswith(self._storage[-1]):
186
+ self.cached_files[-1].pop(path)
187
+ self.save()
188
+ else:
189
+ raise PermissionError(
190
+ "Can only delete cached file in last, writable cache location"
191
+ )
192
+ return fn
193
+
194
+ def save(self) -> None:
195
+ """Save metadata to disk"""
196
+ for (fn, _, writable), cache in zip(self._scan_locations(), self.cached_files):
197
+ if not writable:
198
+ continue
199
+
200
+ if os.path.exists(fn):
201
+ cached_files = self._load(fn)
202
+ for k, c in cached_files.items():
203
+ if k in cache:
204
+ if c["blocks"] is True or cache[k]["blocks"] is True:
205
+ c["blocks"] = True
206
+ else:
207
+ # self.cached_files[*][*]["blocks"] must continue to
208
+ # point to the same set object so that updates
209
+ # performed by MMapCache are propagated back to
210
+ # self.cached_files.
211
+ blocks = cache[k]["blocks"]
212
+ blocks.update(c["blocks"])
213
+ c["blocks"] = blocks
214
+ c["time"] = max(c["time"], cache[k]["time"])
215
+ c["uid"] = cache[k]["uid"]
216
+
217
+ # Files can be added to cache after it was written once
218
+ for k, c in cache.items():
219
+ if k not in cached_files:
220
+ cached_files[k] = c
221
+ else:
222
+ cached_files = cache
223
+ cache = {k: v.copy() for k, v in cached_files.items()}
224
+ for c in cache.values():
225
+ if isinstance(c["blocks"], set):
226
+ c["blocks"] = list(c["blocks"])
227
+ self._save(cache, fn)
228
+ self.cached_files[-1] = cached_files
229
+
230
+ def update_file(self, path: str, detail: Detail) -> None:
231
+ """Update metadata for specific file in memory, do not save"""
232
+ self.cached_files[-1][path] = detail
mgm/lib/python3.10/site-packages/fsspec/implementations/cached.py ADDED
@@ -0,0 +1,929 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import inspect
4
+ import logging
5
+ import os
6
+ import tempfile
7
+ import time
8
+ import weakref
9
+ from shutil import rmtree
10
+ from typing import TYPE_CHECKING, Any, Callable, ClassVar
11
+
12
+ from fsspec import AbstractFileSystem, filesystem
13
+ from fsspec.callbacks import DEFAULT_CALLBACK
14
+ from fsspec.compression import compr
15
+ from fsspec.core import BaseCache, MMapCache
16
+ from fsspec.exceptions import BlocksizeMismatchError
17
+ from fsspec.implementations.cache_mapper import create_cache_mapper
18
+ from fsspec.implementations.cache_metadata import CacheMetadata
19
+ from fsspec.spec import AbstractBufferedFile
20
+ from fsspec.transaction import Transaction
21
+ from fsspec.utils import infer_compression
22
+
23
+ if TYPE_CHECKING:
24
+ from fsspec.implementations.cache_mapper import AbstractCacheMapper
25
+
26
+ logger = logging.getLogger("fsspec.cached")
27
+
28
+
29
+ class WriteCachedTransaction(Transaction):
30
+ def complete(self, commit=True):
31
+ rpaths = [f.path for f in self.files]
32
+ lpaths = [f.fn for f in self.files]
33
+ if commit:
34
+ self.fs.put(lpaths, rpaths)
35
+ self.files.clear()
36
+ self.fs._intrans = False
37
+ self.fs._transaction = None
38
+ self.fs = None # break cycle
39
+
40
+
41
+ class CachingFileSystem(AbstractFileSystem):
42
+ """Locally caching filesystem, layer over any other FS
43
+
44
+ This class implements chunk-wise local storage of remote files, for quick
45
+ access after the initial download. The files are stored in a given
46
+ directory with hashes of URLs for the filenames. If no directory is given,
47
+ a temporary one is used, which should be cleaned up by the OS after the
48
+ process ends. The files themselves are sparse (as implemented in
49
+ :class:`~fsspec.caching.MMapCache`), so only the data which is accessed
50
+ takes up space.
51
+
52
+ Restrictions:
53
+
54
+ - the block-size must be the same for each access of a given file, unless
55
+ all blocks of the file have already been read
56
+ - caching can only be applied to file-systems which produce files
57
+ derived from fsspec.spec.AbstractBufferedFile ; LocalFileSystem is also
58
+ allowed, for testing
59
+ """
60
+
61
+ protocol: ClassVar[str | tuple[str, ...]] = ("blockcache", "cached")
62
+
63
+ def __init__(
64
+ self,
65
+ target_protocol=None,
66
+ cache_storage="TMP",
67
+ cache_check=10,
68
+ check_files=False,
69
+ expiry_time=604800,
70
+ target_options=None,
71
+ fs=None,
72
+ same_names: bool | None = None,
73
+ compression=None,
74
+ cache_mapper: AbstractCacheMapper | None = None,
75
+ **kwargs,
76
+ ):
77
+ """
78
+
79
+ Parameters
80
+ ----------
81
+ target_protocol: str (optional)
82
+ Target filesystem protocol. Provide either this or ``fs``.
83
+ cache_storage: str or list(str)
84
+ Location to store files. If "TMP", this is a temporary directory,
85
+ and will be cleaned up by the OS when this process ends (or later).
86
+ If a list, each location will be tried in the order given, but
87
+ only the last will be considered writable.
88
+ cache_check: int
89
+ Number of seconds between reload of cache metadata
90
+ check_files: bool
91
+ Whether to explicitly see if the UID of the remote file matches
92
+ the stored one before using. Warning: some file systems such as
93
+ HTTP cannot reliably give a unique hash of the contents of some
94
+ path, so be sure to set this option to False.
95
+ expiry_time: int
96
+ The time in seconds after which a local copy is considered useless.
97
+ Set to falsy to prevent expiry. The default is equivalent to one
98
+ week.
99
+ target_options: dict or None
100
+ Passed to the instantiation of the FS, if fs is None.
101
+ fs: filesystem instance
102
+ The target filesystem to run against. Provide this or ``protocol``.
103
+ same_names: bool (optional)
104
+ By default, target URLs are hashed using a ``HashCacheMapper`` so
105
+ that files from different backends with the same basename do not
106
+ conflict. If this argument is ``true``, a ``BasenameCacheMapper``
107
+ is used instead. Other cache mapper options are available by using
108
+ the ``cache_mapper`` keyword argument. Only one of this and
109
+ ``cache_mapper`` should be specified.
110
+ compression: str (optional)
111
+ To decompress on download. Can be 'infer' (guess from the URL name),
112
+ one of the entries in ``fsspec.compression.compr``, or None for no
113
+ decompression.
114
+ cache_mapper: AbstractCacheMapper (optional)
115
+ The object use to map from original filenames to cached filenames.
116
+ Only one of this and ``same_names`` should be specified.
117
+ """
118
+ super().__init__(**kwargs)
119
+ if fs is None and target_protocol is None:
120
+ raise ValueError(
121
+ "Please provide filesystem instance(fs) or target_protocol"
122
+ )
123
+ if not (fs is None) ^ (target_protocol is None):
124
+ raise ValueError(
125
+ "Both filesystems (fs) and target_protocol may not be both given."
126
+ )
127
+ if cache_storage == "TMP":
128
+ tempdir = tempfile.mkdtemp()
129
+ storage = [tempdir]
130
+ weakref.finalize(self, self._remove_tempdir, tempdir)
131
+ else:
132
+ if isinstance(cache_storage, str):
133
+ storage = [cache_storage]
134
+ else:
135
+ storage = cache_storage
136
+ os.makedirs(storage[-1], exist_ok=True)
137
+ self.storage = storage
138
+ self.kwargs = target_options or {}
139
+ self.cache_check = cache_check
140
+ self.check_files = check_files
141
+ self.expiry = expiry_time
142
+ self.compression = compression
143
+
144
+ # Size of cache in bytes. If None then the size is unknown and will be
145
+ # recalculated the next time cache_size() is called. On writes to the
146
+ # cache this is reset to None.
147
+ self._cache_size = None
148
+
149
+ if same_names is not None and cache_mapper is not None:
150
+ raise ValueError(
151
+ "Cannot specify both same_names and cache_mapper in "
152
+ "CachingFileSystem.__init__"
153
+ )
154
+ if cache_mapper is not None:
155
+ self._mapper = cache_mapper
156
+ else:
157
+ self._mapper = create_cache_mapper(
158
+ same_names if same_names is not None else False
159
+ )
160
+
161
+ self.target_protocol = (
162
+ target_protocol
163
+ if isinstance(target_protocol, str)
164
+ else (fs.protocol if isinstance(fs.protocol, str) else fs.protocol[0])
165
+ )
166
+ self._metadata = CacheMetadata(self.storage)
167
+ self.load_cache()
168
+ self.fs = fs if fs is not None else filesystem(target_protocol, **self.kwargs)
169
+
170
+ def _strip_protocol(path):
171
+ # acts as a method, since each instance has a difference target
172
+ return self.fs._strip_protocol(type(self)._strip_protocol(path))
173
+
174
+ self._strip_protocol: Callable = _strip_protocol
175
+
176
+ @staticmethod
177
+ def _remove_tempdir(tempdir):
178
+ try:
179
+ rmtree(tempdir)
180
+ except Exception:
181
+ pass
182
+
183
+ def _mkcache(self):
184
+ os.makedirs(self.storage[-1], exist_ok=True)
185
+
186
+ def cache_size(self):
187
+ """Return size of cache in bytes.
188
+
189
+ If more than one cache directory is in use, only the size of the last
190
+ one (the writable cache directory) is returned.
191
+ """
192
+ if self._cache_size is None:
193
+ cache_dir = self.storage[-1]
194
+ self._cache_size = filesystem("file").du(cache_dir, withdirs=True)
195
+ return self._cache_size
196
+
197
+ def load_cache(self):
198
+ """Read set of stored blocks from file"""
199
+ self._metadata.load()
200
+ self._mkcache()
201
+ self.last_cache = time.time()
202
+
203
+ def save_cache(self):
204
+ """Save set of stored blocks from file"""
205
+ self._mkcache()
206
+ self._metadata.save()
207
+ self.last_cache = time.time()
208
+ self._cache_size = None
209
+
210
+ def _check_cache(self):
211
+ """Reload caches if time elapsed or any disappeared"""
212
+ self._mkcache()
213
+ if not self.cache_check:
214
+ # explicitly told not to bother checking
215
+ return
216
+ timecond = time.time() - self.last_cache > self.cache_check
217
+ existcond = all(os.path.exists(storage) for storage in self.storage)
218
+ if timecond or not existcond:
219
+ self.load_cache()
220
+
221
+ def _check_file(self, path):
222
+ """Is path in cache and still valid"""
223
+ path = self._strip_protocol(path)
224
+ self._check_cache()
225
+ return self._metadata.check_file(path, self)
226
+
227
+ def clear_cache(self):
228
+ """Remove all files and metadata from the cache
229
+
230
+ In the case of multiple cache locations, this clears only the last one,
231
+ which is assumed to be the read/write one.
232
+ """
233
+ rmtree(self.storage[-1])
234
+ self.load_cache()
235
+ self._cache_size = None
236
+
237
+ def clear_expired_cache(self, expiry_time=None):
238
+ """Remove all expired files and metadata from the cache
239
+
240
+ In the case of multiple cache locations, this clears only the last one,
241
+ which is assumed to be the read/write one.
242
+
243
+ Parameters
244
+ ----------
245
+ expiry_time: int
246
+ The time in seconds after which a local copy is considered useless.
247
+ If not defined the default is equivalent to the attribute from the
248
+ file caching instantiation.
249
+ """
250
+
251
+ if not expiry_time:
252
+ expiry_time = self.expiry
253
+
254
+ self._check_cache()
255
+
256
+ expired_files, writable_cache_empty = self._metadata.clear_expired(expiry_time)
257
+ for fn in expired_files:
258
+ if os.path.exists(fn):
259
+ os.remove(fn)
260
+
261
+ if writable_cache_empty:
262
+ rmtree(self.storage[-1])
263
+ self.load_cache()
264
+
265
+ self._cache_size = None
266
+
267
+ def pop_from_cache(self, path):
268
+ """Remove cached version of given file
269
+
270
+ Deletes local copy of the given (remote) path. If it is found in a cache
271
+ location which is not the last, it is assumed to be read-only, and
272
+ raises PermissionError
273
+ """
274
+ path = self._strip_protocol(path)
275
+ fn = self._metadata.pop_file(path)
276
+ if fn is not None:
277
+ os.remove(fn)
278
+ self._cache_size = None
279
+
280
+ def _open(
281
+ self,
282
+ path,
283
+ mode="rb",
284
+ block_size=None,
285
+ autocommit=True,
286
+ cache_options=None,
287
+ **kwargs,
288
+ ):
289
+ """Wrap the target _open
290
+
291
+ If the whole file exists in the cache, just open it locally and
292
+ return that.
293
+
294
+ Otherwise, open the file on the target FS, and make it have a mmap
295
+ cache pointing to the location which we determine, in our cache.
296
+ The ``blocks`` instance is shared, so as the mmap cache instance
297
+ updates, so does the entry in our ``cached_files`` attribute.
298
+ We monkey-patch this file, so that when it closes, we call
299
+ ``close_and_update`` to save the state of the blocks.
300
+ """
301
+ path = self._strip_protocol(path)
302
+
303
+ path = self.fs._strip_protocol(path)
304
+ if "r" not in mode:
305
+ return self.fs._open(
306
+ path,
307
+ mode=mode,
308
+ block_size=block_size,
309
+ autocommit=autocommit,
310
+ cache_options=cache_options,
311
+ **kwargs,
312
+ )
313
+ detail = self._check_file(path)
314
+ if detail:
315
+ # file is in cache
316
+ detail, fn = detail
317
+ hash, blocks = detail["fn"], detail["blocks"]
318
+ if blocks is True:
319
+ # stored file is complete
320
+ logger.debug("Opening local copy of %s", path)
321
+ return open(fn, mode)
322
+ # TODO: action where partial file exists in read-only cache
323
+ logger.debug("Opening partially cached copy of %s", path)
324
+ else:
325
+ hash = self._mapper(path)
326
+ fn = os.path.join(self.storage[-1], hash)
327
+ blocks = set()
328
+ detail = {
329
+ "original": path,
330
+ "fn": hash,
331
+ "blocks": blocks,
332
+ "time": time.time(),
333
+ "uid": self.fs.ukey(path),
334
+ }
335
+ self._metadata.update_file(path, detail)
336
+ logger.debug("Creating local sparse file for %s", path)
337
+
338
+ # call target filesystems open
339
+ self._mkcache()
340
+ f = self.fs._open(
341
+ path,
342
+ mode=mode,
343
+ block_size=block_size,
344
+ autocommit=autocommit,
345
+ cache_options=cache_options,
346
+ cache_type="none",
347
+ **kwargs,
348
+ )
349
+ if self.compression:
350
+ comp = (
351
+ infer_compression(path)
352
+ if self.compression == "infer"
353
+ else self.compression
354
+ )
355
+ f = compr[comp](f, mode="rb")
356
+ if "blocksize" in detail:
357
+ if detail["blocksize"] != f.blocksize:
358
+ raise BlocksizeMismatchError(
359
+ f"Cached file must be reopened with same block"
360
+ f" size as original (old: {detail['blocksize']},"
361
+ f" new {f.blocksize})"
362
+ )
363
+ else:
364
+ detail["blocksize"] = f.blocksize
365
+ f.cache = MMapCache(f.blocksize, f._fetch_range, f.size, fn, blocks)
366
+ close = f.close
367
+ f.close = lambda: self.close_and_update(f, close)
368
+ self.save_cache()
369
+ return f
370
+
371
+ def _parent(self, path):
372
+ return self.fs._parent(path)
373
+
374
+ def hash_name(self, path: str, *args: Any) -> str:
375
+ # Kept for backward compatibility with downstream libraries.
376
+ # Ignores extra arguments, previously same_name boolean.
377
+ return self._mapper(path)
378
+
379
+ def close_and_update(self, f, close):
380
+ """Called when a file is closing, so store the set of blocks"""
381
+ if f.closed:
382
+ return
383
+ path = self._strip_protocol(f.path)
384
+ self._metadata.on_close_cached_file(f, path)
385
+ try:
386
+ logger.debug("going to save")
387
+ self.save_cache()
388
+ logger.debug("saved")
389
+ except OSError:
390
+ logger.debug("Cache saving failed while closing file")
391
+ except NameError:
392
+ logger.debug("Cache save failed due to interpreter shutdown")
393
+ close()
394
+ f.closed = True
395
+
396
+ def ls(self, path, detail=True):
397
+ return self.fs.ls(path, detail)
398
+
399
+ def __getattribute__(self, item):
400
+ if item in {
401
+ "load_cache",
402
+ "_open",
403
+ "save_cache",
404
+ "close_and_update",
405
+ "__init__",
406
+ "__getattribute__",
407
+ "__reduce__",
408
+ "_make_local_details",
409
+ "open",
410
+ "cat",
411
+ "cat_file",
412
+ "cat_ranges",
413
+ "get",
414
+ "read_block",
415
+ "tail",
416
+ "head",
417
+ "info",
418
+ "ls",
419
+ "exists",
420
+ "isfile",
421
+ "isdir",
422
+ "_check_file",
423
+ "_check_cache",
424
+ "_mkcache",
425
+ "clear_cache",
426
+ "clear_expired_cache",
427
+ "pop_from_cache",
428
+ "local_file",
429
+ "_paths_from_path",
430
+ "get_mapper",
431
+ "open_many",
432
+ "commit_many",
433
+ "hash_name",
434
+ "__hash__",
435
+ "__eq__",
436
+ "to_json",
437
+ "to_dict",
438
+ "cache_size",
439
+ "pipe_file",
440
+ "pipe",
441
+ "start_transaction",
442
+ "end_transaction",
443
+ }:
444
+ # all the methods defined in this class. Note `open` here, since
445
+ # it calls `_open`, but is actually in superclass
446
+ return lambda *args, **kw: getattr(type(self), item).__get__(self)(
447
+ *args, **kw
448
+ )
449
+ if item in ["__reduce_ex__"]:
450
+ raise AttributeError
451
+ if item in ["transaction"]:
452
+ # property
453
+ return type(self).transaction.__get__(self)
454
+ if item in ["_cache", "transaction_type"]:
455
+ # class attributes
456
+ return getattr(type(self), item)
457
+ if item == "__class__":
458
+ return type(self)
459
+ d = object.__getattribute__(self, "__dict__")
460
+ fs = d.get("fs", None) # fs is not immediately defined
461
+ if item in d:
462
+ return d[item]
463
+ elif fs is not None:
464
+ if item in fs.__dict__:
465
+ # attribute of instance
466
+ return fs.__dict__[item]
467
+ # attributed belonging to the target filesystem
468
+ cls = type(fs)
469
+ m = getattr(cls, item)
470
+ if (inspect.isfunction(m) or inspect.isdatadescriptor(m)) and (
471
+ not hasattr(m, "__self__") or m.__self__ is None
472
+ ):
473
+ # instance method
474
+ return m.__get__(fs, cls)
475
+ return m # class method or attribute
476
+ else:
477
+ # attributes of the superclass, while target is being set up
478
+ return super().__getattribute__(item)
479
+
480
+ def __eq__(self, other):
481
+ """Test for equality."""
482
+ if self is other:
483
+ return True
484
+ if not isinstance(other, type(self)):
485
+ return False
486
+ return (
487
+ self.storage == other.storage
488
+ and self.kwargs == other.kwargs
489
+ and self.cache_check == other.cache_check
490
+ and self.check_files == other.check_files
491
+ and self.expiry == other.expiry
492
+ and self.compression == other.compression
493
+ and self._mapper == other._mapper
494
+ and self.target_protocol == other.target_protocol
495
+ )
496
+
497
+ def __hash__(self):
498
+ """Calculate hash."""
499
+ return (
500
+ hash(tuple(self.storage))
501
+ ^ hash(str(self.kwargs))
502
+ ^ hash(self.cache_check)
503
+ ^ hash(self.check_files)
504
+ ^ hash(self.expiry)
505
+ ^ hash(self.compression)
506
+ ^ hash(self._mapper)
507
+ ^ hash(self.target_protocol)
508
+ )
509
+
510
+
511
+ class WholeFileCacheFileSystem(CachingFileSystem):
512
+ """Caches whole remote files on first access
513
+
514
+ This class is intended as a layer over any other file system, and
515
+ will make a local copy of each file accessed, so that all subsequent
516
+ reads are local. This is similar to ``CachingFileSystem``, but without
517
+ the block-wise functionality and so can work even when sparse files
518
+ are not allowed. See its docstring for definition of the init
519
+ arguments.
520
+
521
+ The class still needs access to the remote store for listing files,
522
+ and may refresh cached files.
523
+ """
524
+
525
+ protocol = "filecache"
526
+ local_file = True
527
+
528
+ def open_many(self, open_files, **kwargs):
529
+ paths = [of.path for of in open_files]
530
+ if "r" in open_files.mode:
531
+ self._mkcache()
532
+ else:
533
+ return [
534
+ LocalTempFile(
535
+ self.fs,
536
+ path,
537
+ mode=open_files.mode,
538
+ fn=os.path.join(self.storage[-1], self._mapper(path)),
539
+ **kwargs,
540
+ )
541
+ for path in paths
542
+ ]
543
+
544
+ if self.compression:
545
+ raise NotImplementedError
546
+ details = [self._check_file(sp) for sp in paths]
547
+ downpath = [p for p, d in zip(paths, details) if not d]
548
+ downfn0 = [
549
+ os.path.join(self.storage[-1], self._mapper(p))
550
+ for p, d in zip(paths, details)
551
+ ] # keep these path names for opening later
552
+ downfn = [fn for fn, d in zip(downfn0, details) if not d]
553
+ if downpath:
554
+ # skip if all files are already cached and up to date
555
+ self.fs.get(downpath, downfn)
556
+
557
+ # update metadata - only happens when downloads are successful
558
+ newdetail = [
559
+ {
560
+ "original": path,
561
+ "fn": self._mapper(path),
562
+ "blocks": True,
563
+ "time": time.time(),
564
+ "uid": self.fs.ukey(path),
565
+ }
566
+ for path in downpath
567
+ ]
568
+ for path, detail in zip(downpath, newdetail):
569
+ self._metadata.update_file(path, detail)
570
+ self.save_cache()
571
+
572
+ def firstpart(fn):
573
+ # helper to adapt both whole-file and simple-cache
574
+ return fn[1] if isinstance(fn, tuple) else fn
575
+
576
+ return [
577
+ open(firstpart(fn0) if fn0 else fn1, mode=open_files.mode)
578
+ for fn0, fn1 in zip(details, downfn0)
579
+ ]
580
+
581
+ def commit_many(self, open_files):
582
+ self.fs.put([f.fn for f in open_files], [f.path for f in open_files])
583
+ [f.close() for f in open_files]
584
+ for f in open_files:
585
+ # in case autocommit is off, and so close did not already delete
586
+ try:
587
+ os.remove(f.name)
588
+ except FileNotFoundError:
589
+ pass
590
+ self._cache_size = None
591
+
592
+ def _make_local_details(self, path):
593
+ hash = self._mapper(path)
594
+ fn = os.path.join(self.storage[-1], hash)
595
+ detail = {
596
+ "original": path,
597
+ "fn": hash,
598
+ "blocks": True,
599
+ "time": time.time(),
600
+ "uid": self.fs.ukey(path),
601
+ }
602
+ self._metadata.update_file(path, detail)
603
+ logger.debug("Copying %s to local cache", path)
604
+ return fn
605
+
606
+ def cat(
607
+ self,
608
+ path,
609
+ recursive=False,
610
+ on_error="raise",
611
+ callback=DEFAULT_CALLBACK,
612
+ **kwargs,
613
+ ):
614
+ paths = self.expand_path(
615
+ path, recursive=recursive, maxdepth=kwargs.get("maxdepth")
616
+ )
617
+ getpaths = []
618
+ storepaths = []
619
+ fns = []
620
+ out = {}
621
+ for p in paths.copy():
622
+ try:
623
+ detail = self._check_file(p)
624
+ if not detail:
625
+ fn = self._make_local_details(p)
626
+ getpaths.append(p)
627
+ storepaths.append(fn)
628
+ else:
629
+ detail, fn = detail if isinstance(detail, tuple) else (None, detail)
630
+ fns.append(fn)
631
+ except Exception as e:
632
+ if on_error == "raise":
633
+ raise
634
+ if on_error == "return":
635
+ out[p] = e
636
+ paths.remove(p)
637
+
638
+ if getpaths:
639
+ self.fs.get(getpaths, storepaths)
640
+ self.save_cache()
641
+
642
+ callback.set_size(len(paths))
643
+ for p, fn in zip(paths, fns):
644
+ with open(fn, "rb") as f:
645
+ out[p] = f.read()
646
+ callback.relative_update(1)
647
+ if isinstance(path, str) and len(paths) == 1 and recursive is False:
648
+ out = out[paths[0]]
649
+ return out
650
+
651
+ def _open(self, path, mode="rb", **kwargs):
652
+ path = self._strip_protocol(path)
653
+ if "r" not in mode:
654
+ hash = self._mapper(path)
655
+ fn = os.path.join(self.storage[-1], hash)
656
+ user_specified_kwargs = {
657
+ k: v
658
+ for k, v in kwargs.items()
659
+ # those kwargs were added by open(), we don't want them
660
+ if k not in ["autocommit", "block_size", "cache_options"]
661
+ }
662
+ return LocalTempFile(self, path, mode=mode, fn=fn, **user_specified_kwargs)
663
+ detail = self._check_file(path)
664
+ if detail:
665
+ detail, fn = detail
666
+ _, blocks = detail["fn"], detail["blocks"]
667
+ if blocks is True:
668
+ logger.debug("Opening local copy of %s", path)
669
+
670
+ # In order to support downstream filesystems to be able to
671
+ # infer the compression from the original filename, like
672
+ # the `TarFileSystem`, let's extend the `io.BufferedReader`
673
+ # fileobject protocol by adding a dedicated attribute
674
+ # `original`.
675
+ f = open(fn, mode)
676
+ f.original = detail.get("original")
677
+ return f
678
+ else:
679
+ raise ValueError(
680
+ f"Attempt to open partially cached file {path}"
681
+ f" as a wholly cached file"
682
+ )
683
+ else:
684
+ fn = self._make_local_details(path)
685
+ kwargs["mode"] = mode
686
+
687
+ # call target filesystems open
688
+ self._mkcache()
689
+ if self.compression:
690
+ with self.fs._open(path, **kwargs) as f, open(fn, "wb") as f2:
691
+ if isinstance(f, AbstractBufferedFile):
692
+ # want no type of caching if just downloading whole thing
693
+ f.cache = BaseCache(0, f.cache.fetcher, f.size)
694
+ comp = (
695
+ infer_compression(path)
696
+ if self.compression == "infer"
697
+ else self.compression
698
+ )
699
+ f = compr[comp](f, mode="rb")
700
+ data = True
701
+ while data:
702
+ block = getattr(f, "blocksize", 5 * 2**20)
703
+ data = f.read(block)
704
+ f2.write(data)
705
+ else:
706
+ self.fs.get_file(path, fn)
707
+ self.save_cache()
708
+ return self._open(path, mode)
709
+
710
+
711
+ class SimpleCacheFileSystem(WholeFileCacheFileSystem):
712
+ """Caches whole remote files on first access
713
+
714
+ This class is intended as a layer over any other file system, and
715
+ will make a local copy of each file accessed, so that all subsequent
716
+ reads are local. This implementation only copies whole files, and
717
+ does not keep any metadata about the download time or file details.
718
+ It is therefore safer to use in multi-threaded/concurrent situations.
719
+
720
+ This is the only of the caching filesystems that supports write: you will
721
+ be given a real local open file, and upon close and commit, it will be
722
+ uploaded to the target filesystem; the writability or the target URL is
723
+ not checked until that time.
724
+
725
+ """
726
+
727
+ protocol = "simplecache"
728
+ local_file = True
729
+ transaction_type = WriteCachedTransaction
730
+
731
+ def __init__(self, **kwargs):
732
+ kw = kwargs.copy()
733
+ for key in ["cache_check", "expiry_time", "check_files"]:
734
+ kw[key] = False
735
+ super().__init__(**kw)
736
+ for storage in self.storage:
737
+ if not os.path.exists(storage):
738
+ os.makedirs(storage, exist_ok=True)
739
+
740
+ def _check_file(self, path):
741
+ self._check_cache()
742
+ sha = self._mapper(path)
743
+ for storage in self.storage:
744
+ fn = os.path.join(storage, sha)
745
+ if os.path.exists(fn):
746
+ return fn
747
+
748
+ def save_cache(self):
749
+ pass
750
+
751
+ def load_cache(self):
752
+ pass
753
+
754
+ def pipe_file(self, path, value=None, **kwargs):
755
+ if self._intrans:
756
+ with self.open(path, "wb") as f:
757
+ f.write(value)
758
+ else:
759
+ super().pipe_file(path, value)
760
+
761
+ def ls(self, path, detail=True, **kwargs):
762
+ path = self._strip_protocol(path)
763
+ details = []
764
+ try:
765
+ details = self.fs.ls(
766
+ path, detail=True, **kwargs
767
+ ).copy() # don't edit original!
768
+ except FileNotFoundError as e:
769
+ ex = e
770
+ else:
771
+ ex = None
772
+ if self._intrans:
773
+ path1 = path.rstrip("/") + "/"
774
+ for f in self.transaction.files:
775
+ if f.path == path:
776
+ details.append(
777
+ {"name": path, "size": f.size or f.tell(), "type": "file"}
778
+ )
779
+ elif f.path.startswith(path1):
780
+ if f.path.count("/") == path1.count("/"):
781
+ details.append(
782
+ {"name": f.path, "size": f.size or f.tell(), "type": "file"}
783
+ )
784
+ else:
785
+ dname = "/".join(f.path.split("/")[: path1.count("/") + 1])
786
+ details.append({"name": dname, "size": 0, "type": "directory"})
787
+ if ex is not None and not details:
788
+ raise ex
789
+ if detail:
790
+ return details
791
+ return sorted(_["name"] for _ in details)
792
+
793
+ def info(self, path, **kwargs):
794
+ path = self._strip_protocol(path)
795
+ if self._intrans:
796
+ f = [_ for _ in self.transaction.files if _.path == path]
797
+ if f:
798
+ size = os.path.getsize(f[0].fn) if f[0].closed else f[0].tell()
799
+ return {"name": path, "size": size, "type": "file"}
800
+ f = any(_.path.startswith(path + "/") for _ in self.transaction.files)
801
+ if f:
802
+ return {"name": path, "size": 0, "type": "directory"}
803
+ return self.fs.info(path, **kwargs)
804
+
805
+ def pipe(self, path, value=None, **kwargs):
806
+ if isinstance(path, str):
807
+ self.pipe_file(self._strip_protocol(path), value, **kwargs)
808
+ elif isinstance(path, dict):
809
+ for k, v in path.items():
810
+ self.pipe_file(self._strip_protocol(k), v, **kwargs)
811
+ else:
812
+ raise ValueError("path must be str or dict")
813
+
814
+ def cat_ranges(
815
+ self, paths, starts, ends, max_gap=None, on_error="return", **kwargs
816
+ ):
817
+ lpaths = [self._check_file(p) for p in paths]
818
+ rpaths = [p for l, p in zip(lpaths, paths) if l is False]
819
+ lpaths = [l for l, p in zip(lpaths, paths) if l is False]
820
+ self.fs.get(rpaths, lpaths)
821
+ return super().cat_ranges(
822
+ paths, starts, ends, max_gap=max_gap, on_error=on_error, **kwargs
823
+ )
824
+
825
+ def _open(self, path, mode="rb", **kwargs):
826
+ path = self._strip_protocol(path)
827
+ sha = self._mapper(path)
828
+
829
+ if "r" not in mode:
830
+ fn = os.path.join(self.storage[-1], sha)
831
+ user_specified_kwargs = {
832
+ k: v
833
+ for k, v in kwargs.items()
834
+ if k not in ["autocommit", "block_size", "cache_options"]
835
+ } # those were added by open()
836
+ return LocalTempFile(
837
+ self,
838
+ path,
839
+ mode=mode,
840
+ autocommit=not self._intrans,
841
+ fn=fn,
842
+ **user_specified_kwargs,
843
+ )
844
+ fn = self._check_file(path)
845
+ if fn:
846
+ return open(fn, mode)
847
+
848
+ fn = os.path.join(self.storage[-1], sha)
849
+ logger.debug("Copying %s to local cache", path)
850
+ kwargs["mode"] = mode
851
+
852
+ self._mkcache()
853
+ self._cache_size = None
854
+ if self.compression:
855
+ with self.fs._open(path, **kwargs) as f, open(fn, "wb") as f2:
856
+ if isinstance(f, AbstractBufferedFile):
857
+ # want no type of caching if just downloading whole thing
858
+ f.cache = BaseCache(0, f.cache.fetcher, f.size)
859
+ comp = (
860
+ infer_compression(path)
861
+ if self.compression == "infer"
862
+ else self.compression
863
+ )
864
+ f = compr[comp](f, mode="rb")
865
+ data = True
866
+ while data:
867
+ block = getattr(f, "blocksize", 5 * 2**20)
868
+ data = f.read(block)
869
+ f2.write(data)
870
+ else:
871
+ self.fs.get_file(path, fn)
872
+ return self._open(path, mode)
873
+
874
+
875
+ class LocalTempFile:
876
+ """A temporary local file, which will be uploaded on commit"""
877
+
878
+ def __init__(self, fs, path, fn, mode="wb", autocommit=True, seek=0, **kwargs):
879
+ self.fn = fn
880
+ self.fh = open(fn, mode)
881
+ self.mode = mode
882
+ if seek:
883
+ self.fh.seek(seek)
884
+ self.path = path
885
+ self.size = None
886
+ self.fs = fs
887
+ self.closed = False
888
+ self.autocommit = autocommit
889
+ self.kwargs = kwargs
890
+
891
+ def __reduce__(self):
892
+ # always open in r+b to allow continuing writing at a location
893
+ return (
894
+ LocalTempFile,
895
+ (self.fs, self.path, self.fn, "r+b", self.autocommit, self.tell()),
896
+ )
897
+
898
+ def __enter__(self):
899
+ return self.fh
900
+
901
+ def __exit__(self, exc_type, exc_val, exc_tb):
902
+ self.close()
903
+
904
+ def close(self):
905
+ # self.size = self.fh.tell()
906
+ if self.closed:
907
+ return
908
+ self.fh.close()
909
+ self.closed = True
910
+ if self.autocommit:
911
+ self.commit()
912
+
913
+ def discard(self):
914
+ self.fh.close()
915
+ os.remove(self.fn)
916
+
917
+ def commit(self):
918
+ self.fs.put(self.fn, self.path, **self.kwargs)
919
+ # we do not delete local copy - it's still in the cache
920
+
921
+ @property
922
+ def name(self):
923
+ return self.fn
924
+
925
+ def __repr__(self) -> str:
926
+ return f"LocalTempFile: {self.path}"
927
+
928
+ def __getattr__(self, item):
929
+ return getattr(self.fh, item)
mgm/lib/python3.10/site-packages/fsspec/implementations/dask.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import dask
2
+ from distributed.client import Client, _get_global_client
3
+ from distributed.worker import Worker
4
+
5
+ from fsspec import filesystem
6
+ from fsspec.spec import AbstractBufferedFile, AbstractFileSystem
7
+ from fsspec.utils import infer_storage_options
8
+
9
+
10
+ def _get_client(client):
11
+ if client is None:
12
+ return _get_global_client()
13
+ elif isinstance(client, Client):
14
+ return client
15
+ else:
16
+ # e.g., connection string
17
+ return Client(client)
18
+
19
+
20
+ def _in_worker():
21
+ return bool(Worker._instances)
22
+
23
+
24
+ class DaskWorkerFileSystem(AbstractFileSystem):
25
+ """View files accessible to a worker as any other remote file-system
26
+
27
+ When instances are run on the worker, uses the real filesystem. When
28
+ run on the client, they call the worker to provide information or data.
29
+
30
+ **Warning** this implementation is experimental, and read-only for now.
31
+ """
32
+
33
+ def __init__(
34
+ self, target_protocol=None, target_options=None, fs=None, client=None, **kwargs
35
+ ):
36
+ super().__init__(**kwargs)
37
+ if not (fs is None) ^ (target_protocol is None):
38
+ raise ValueError(
39
+ "Please provide one of filesystem instance (fs) or"
40
+ " target_protocol, not both"
41
+ )
42
+ self.target_protocol = target_protocol
43
+ self.target_options = target_options
44
+ self.worker = None
45
+ self.client = client
46
+ self.fs = fs
47
+ self._determine_worker()
48
+
49
+ @staticmethod
50
+ def _get_kwargs_from_urls(path):
51
+ so = infer_storage_options(path)
52
+ if "host" in so and "port" in so:
53
+ return {"client": f"{so['host']}:{so['port']}"}
54
+ else:
55
+ return {}
56
+
57
+ def _determine_worker(self):
58
+ if _in_worker():
59
+ self.worker = True
60
+ if self.fs is None:
61
+ self.fs = filesystem(
62
+ self.target_protocol, **(self.target_options or {})
63
+ )
64
+ else:
65
+ self.worker = False
66
+ self.client = _get_client(self.client)
67
+ self.rfs = dask.delayed(self)
68
+
69
+ def mkdir(self, *args, **kwargs):
70
+ if self.worker:
71
+ self.fs.mkdir(*args, **kwargs)
72
+ else:
73
+ self.rfs.mkdir(*args, **kwargs).compute()
74
+
75
+ def rm(self, *args, **kwargs):
76
+ if self.worker:
77
+ self.fs.rm(*args, **kwargs)
78
+ else:
79
+ self.rfs.rm(*args, **kwargs).compute()
80
+
81
+ def copy(self, *args, **kwargs):
82
+ if self.worker:
83
+ self.fs.copy(*args, **kwargs)
84
+ else:
85
+ self.rfs.copy(*args, **kwargs).compute()
86
+
87
+ def mv(self, *args, **kwargs):
88
+ if self.worker:
89
+ self.fs.mv(*args, **kwargs)
90
+ else:
91
+ self.rfs.mv(*args, **kwargs).compute()
92
+
93
+ def ls(self, *args, **kwargs):
94
+ if self.worker:
95
+ return self.fs.ls(*args, **kwargs)
96
+ else:
97
+ return self.rfs.ls(*args, **kwargs).compute()
98
+
99
+ def _open(
100
+ self,
101
+ path,
102
+ mode="rb",
103
+ block_size=None,
104
+ autocommit=True,
105
+ cache_options=None,
106
+ **kwargs,
107
+ ):
108
+ if self.worker:
109
+ return self.fs._open(
110
+ path,
111
+ mode=mode,
112
+ block_size=block_size,
113
+ autocommit=autocommit,
114
+ cache_options=cache_options,
115
+ **kwargs,
116
+ )
117
+ else:
118
+ return DaskFile(
119
+ fs=self,
120
+ path=path,
121
+ mode=mode,
122
+ block_size=block_size,
123
+ autocommit=autocommit,
124
+ cache_options=cache_options,
125
+ **kwargs,
126
+ )
127
+
128
+ def fetch_range(self, path, mode, start, end):
129
+ if self.worker:
130
+ with self._open(path, mode) as f:
131
+ f.seek(start)
132
+ return f.read(end - start)
133
+ else:
134
+ return self.rfs.fetch_range(path, mode, start, end).compute()
135
+
136
+
137
+ class DaskFile(AbstractBufferedFile):
138
+ def __init__(self, mode="rb", **kwargs):
139
+ if mode != "rb":
140
+ raise ValueError('Remote dask files can only be opened in "rb" mode')
141
+ super().__init__(**kwargs)
142
+
143
+ def _upload_chunk(self, final=False):
144
+ pass
145
+
146
+ def _initiate_upload(self):
147
+ """Create remote file/upload"""
148
+ pass
149
+
150
+ def _fetch_range(self, start, end):
151
+ """Get the specified set of bytes from remote"""
152
+ return self.fs.fetch_range(self.path, self.mode, start, end)
mgm/lib/python3.10/site-packages/fsspec/implementations/data.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ import io
3
+ from typing import Optional
4
+ from urllib.parse import unquote
5
+
6
+ from fsspec import AbstractFileSystem
7
+
8
+
9
+ class DataFileSystem(AbstractFileSystem):
10
+ """A handy decoder for data-URLs
11
+
12
+ Example
13
+ -------
14
+ >>> with fsspec.open("data:,Hello%2C%20World%21") as f:
15
+ ... print(f.read())
16
+ b"Hello, World!"
17
+
18
+ See https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URLs
19
+ """
20
+
21
+ protocol = "data"
22
+
23
+ def __init__(self, **kwargs):
24
+ """No parameters for this filesystem"""
25
+ super().__init__(**kwargs)
26
+
27
+ def cat_file(self, path, start=None, end=None, **kwargs):
28
+ pref, data = path.split(",", 1)
29
+ if pref.endswith("base64"):
30
+ return base64.b64decode(data)[start:end]
31
+ return unquote(data).encode()[start:end]
32
+
33
+ def info(self, path, **kwargs):
34
+ pref, name = path.split(",", 1)
35
+ data = self.cat_file(path)
36
+ mime = pref.split(":", 1)[1].split(";", 1)[0]
37
+ return {"name": name, "size": len(data), "type": "file", "mimetype": mime}
38
+
39
+ def _open(
40
+ self,
41
+ path,
42
+ mode="rb",
43
+ block_size=None,
44
+ autocommit=True,
45
+ cache_options=None,
46
+ **kwargs,
47
+ ):
48
+ if "r" not in mode:
49
+ raise ValueError("Read only filesystem")
50
+ return io.BytesIO(self.cat_file(path))
51
+
52
+ @staticmethod
53
+ def encode(data: bytes, mime: Optional[str] = None):
54
+ """Format the given data into data-URL syntax
55
+
56
+ This version always base64 encodes, even when the data is ascii/url-safe.
57
+ """
58
+ return f"data:{mime or ''};base64,{base64.b64encode(data).decode()}"
mgm/lib/python3.10/site-packages/fsspec/implementations/dbfs.py ADDED
@@ -0,0 +1,467 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ import urllib
3
+
4
+ import requests
5
+ import requests.exceptions
6
+ from requests.adapters import HTTPAdapter, Retry
7
+
8
+ from fsspec import AbstractFileSystem
9
+ from fsspec.spec import AbstractBufferedFile
10
+
11
+
12
+ class DatabricksException(Exception):
13
+ """
14
+ Helper class for exceptions raised in this module.
15
+ """
16
+
17
+ def __init__(self, error_code, message):
18
+ """Create a new DatabricksException"""
19
+ super().__init__(message)
20
+
21
+ self.error_code = error_code
22
+ self.message = message
23
+
24
+
25
+ class DatabricksFileSystem(AbstractFileSystem):
26
+ """
27
+ Get access to the Databricks filesystem implementation over HTTP.
28
+ Can be used inside and outside of a databricks cluster.
29
+ """
30
+
31
+ def __init__(self, instance, token, **kwargs):
32
+ """
33
+ Create a new DatabricksFileSystem.
34
+
35
+ Parameters
36
+ ----------
37
+ instance: str
38
+ The instance URL of the databricks cluster.
39
+ For example for an Azure databricks cluster, this
40
+ has the form adb-<some-number>.<two digits>.azuredatabricks.net.
41
+ token: str
42
+ Your personal token. Find out more
43
+ here: https://docs.databricks.com/dev-tools/api/latest/authentication.html
44
+ """
45
+ self.instance = instance
46
+ self.token = token
47
+ self.session = requests.Session()
48
+ self.retries = Retry(
49
+ total=10,
50
+ backoff_factor=0.05,
51
+ status_forcelist=[408, 429, 500, 502, 503, 504],
52
+ )
53
+
54
+ self.session.mount("https://", HTTPAdapter(max_retries=self.retries))
55
+ self.session.headers.update({"Authorization": f"Bearer {self.token}"})
56
+
57
+ super().__init__(**kwargs)
58
+
59
+ def ls(self, path, detail=True, **kwargs):
60
+ """
61
+ List the contents of the given path.
62
+
63
+ Parameters
64
+ ----------
65
+ path: str
66
+ Absolute path
67
+ detail: bool
68
+ Return not only the list of filenames,
69
+ but also additional information on file sizes
70
+ and types.
71
+ """
72
+ out = self._ls_from_cache(path)
73
+ if not out:
74
+ try:
75
+ r = self._send_to_api(
76
+ method="get", endpoint="list", json={"path": path}
77
+ )
78
+ except DatabricksException as e:
79
+ if e.error_code == "RESOURCE_DOES_NOT_EXIST":
80
+ raise FileNotFoundError(e.message) from e
81
+
82
+ raise
83
+ files = r["files"]
84
+ out = [
85
+ {
86
+ "name": o["path"],
87
+ "type": "directory" if o["is_dir"] else "file",
88
+ "size": o["file_size"],
89
+ }
90
+ for o in files
91
+ ]
92
+ self.dircache[path] = out
93
+
94
+ if detail:
95
+ return out
96
+ return [o["name"] for o in out]
97
+
98
+ def makedirs(self, path, exist_ok=True):
99
+ """
100
+ Create a given absolute path and all of its parents.
101
+
102
+ Parameters
103
+ ----------
104
+ path: str
105
+ Absolute path to create
106
+ exist_ok: bool
107
+ If false, checks if the folder
108
+ exists before creating it (and raises an
109
+ Exception if this is the case)
110
+ """
111
+ if not exist_ok:
112
+ try:
113
+ # If the following succeeds, the path is already present
114
+ self._send_to_api(
115
+ method="get", endpoint="get-status", json={"path": path}
116
+ )
117
+ raise FileExistsError(f"Path {path} already exists")
118
+ except DatabricksException as e:
119
+ if e.error_code == "RESOURCE_DOES_NOT_EXIST":
120
+ pass
121
+
122
+ try:
123
+ self._send_to_api(method="post", endpoint="mkdirs", json={"path": path})
124
+ except DatabricksException as e:
125
+ if e.error_code == "RESOURCE_ALREADY_EXISTS":
126
+ raise FileExistsError(e.message) from e
127
+
128
+ raise
129
+ self.invalidate_cache(self._parent(path))
130
+
131
+ def mkdir(self, path, create_parents=True, **kwargs):
132
+ """
133
+ Create a given absolute path and all of its parents.
134
+
135
+ Parameters
136
+ ----------
137
+ path: str
138
+ Absolute path to create
139
+ create_parents: bool
140
+ Whether to create all parents or not.
141
+ "False" is not implemented so far.
142
+ """
143
+ if not create_parents:
144
+ raise NotImplementedError
145
+
146
+ self.mkdirs(path, **kwargs)
147
+
148
+ def rm(self, path, recursive=False, **kwargs):
149
+ """
150
+ Remove the file or folder at the given absolute path.
151
+
152
+ Parameters
153
+ ----------
154
+ path: str
155
+ Absolute path what to remove
156
+ recursive: bool
157
+ Recursively delete all files in a folder.
158
+ """
159
+ try:
160
+ self._send_to_api(
161
+ method="post",
162
+ endpoint="delete",
163
+ json={"path": path, "recursive": recursive},
164
+ )
165
+ except DatabricksException as e:
166
+ # This is not really an exception, it just means
167
+ # not everything was deleted so far
168
+ if e.error_code == "PARTIAL_DELETE":
169
+ self.rm(path=path, recursive=recursive)
170
+ elif e.error_code == "IO_ERROR":
171
+ # Using the same exception as the os module would use here
172
+ raise OSError(e.message) from e
173
+
174
+ raise
175
+ self.invalidate_cache(self._parent(path))
176
+
177
+ def mv(
178
+ self, source_path, destination_path, recursive=False, maxdepth=None, **kwargs
179
+ ):
180
+ """
181
+ Move a source to a destination path.
182
+
183
+ A note from the original [databricks API manual]
184
+ (https://docs.databricks.com/dev-tools/api/latest/dbfs.html#move).
185
+
186
+ When moving a large number of files the API call will time out after
187
+ approximately 60s, potentially resulting in partially moved data.
188
+ Therefore, for operations that move more than 10k files, we strongly
189
+ discourage using the DBFS REST API.
190
+
191
+ Parameters
192
+ ----------
193
+ source_path: str
194
+ From where to move (absolute path)
195
+ destination_path: str
196
+ To where to move (absolute path)
197
+ recursive: bool
198
+ Not implemented to far.
199
+ maxdepth:
200
+ Not implemented to far.
201
+ """
202
+ if recursive:
203
+ raise NotImplementedError
204
+ if maxdepth:
205
+ raise NotImplementedError
206
+
207
+ try:
208
+ self._send_to_api(
209
+ method="post",
210
+ endpoint="move",
211
+ json={"source_path": source_path, "destination_path": destination_path},
212
+ )
213
+ except DatabricksException as e:
214
+ if e.error_code == "RESOURCE_DOES_NOT_EXIST":
215
+ raise FileNotFoundError(e.message) from e
216
+ elif e.error_code == "RESOURCE_ALREADY_EXISTS":
217
+ raise FileExistsError(e.message) from e
218
+
219
+ raise
220
+ self.invalidate_cache(self._parent(source_path))
221
+ self.invalidate_cache(self._parent(destination_path))
222
+
223
+ def _open(self, path, mode="rb", block_size="default", **kwargs):
224
+ """
225
+ Overwrite the base class method to make sure to create a DBFile.
226
+ All arguments are copied from the base method.
227
+
228
+ Only the default blocksize is allowed.
229
+ """
230
+ return DatabricksFile(self, path, mode=mode, block_size=block_size, **kwargs)
231
+
232
+ def _send_to_api(self, method, endpoint, json):
233
+ """
234
+ Send the given json to the DBFS API
235
+ using a get or post request (specified by the argument `method`).
236
+
237
+ Parameters
238
+ ----------
239
+ method: str
240
+ Which http method to use for communication; "get" or "post".
241
+ endpoint: str
242
+ Where to send the request to (last part of the API URL)
243
+ json: dict
244
+ Dictionary of information to send
245
+ """
246
+ if method == "post":
247
+ session_call = self.session.post
248
+ elif method == "get":
249
+ session_call = self.session.get
250
+ else:
251
+ raise ValueError(f"Do not understand method {method}")
252
+
253
+ url = urllib.parse.urljoin(f"https://{self.instance}/api/2.0/dbfs/", endpoint)
254
+
255
+ r = session_call(url, json=json)
256
+
257
+ # The DBFS API will return a json, also in case of an exception.
258
+ # We want to preserve this information as good as possible.
259
+ try:
260
+ r.raise_for_status()
261
+ except requests.HTTPError as e:
262
+ # try to extract json error message
263
+ # if that fails, fall back to the original exception
264
+ try:
265
+ exception_json = e.response.json()
266
+ except Exception:
267
+ raise e from None
268
+
269
+ raise DatabricksException(**exception_json) from e
270
+
271
+ return r.json()
272
+
273
+ def _create_handle(self, path, overwrite=True):
274
+ """
275
+ Internal function to create a handle, which can be used to
276
+ write blocks of a file to DBFS.
277
+ A handle has a unique identifier which needs to be passed
278
+ whenever written during this transaction.
279
+ The handle is active for 10 minutes - after that a new
280
+ write transaction needs to be created.
281
+ Make sure to close the handle after you are finished.
282
+
283
+ Parameters
284
+ ----------
285
+ path: str
286
+ Absolute path for this file.
287
+ overwrite: bool
288
+ If a file already exist at this location, either overwrite
289
+ it or raise an exception.
290
+ """
291
+ try:
292
+ r = self._send_to_api(
293
+ method="post",
294
+ endpoint="create",
295
+ json={"path": path, "overwrite": overwrite},
296
+ )
297
+ return r["handle"]
298
+ except DatabricksException as e:
299
+ if e.error_code == "RESOURCE_ALREADY_EXISTS":
300
+ raise FileExistsError(e.message) from e
301
+
302
+ raise
303
+
304
+ def _close_handle(self, handle):
305
+ """
306
+ Close a handle, which was opened by :func:`_create_handle`.
307
+
308
+ Parameters
309
+ ----------
310
+ handle: str
311
+ Which handle to close.
312
+ """
313
+ try:
314
+ self._send_to_api(method="post", endpoint="close", json={"handle": handle})
315
+ except DatabricksException as e:
316
+ if e.error_code == "RESOURCE_DOES_NOT_EXIST":
317
+ raise FileNotFoundError(e.message) from e
318
+
319
+ raise
320
+
321
+ def _add_data(self, handle, data):
322
+ """
323
+ Upload data to an already opened file handle
324
+ (opened by :func:`_create_handle`).
325
+ The maximal allowed data size is 1MB after
326
+ conversion to base64.
327
+ Remember to close the handle when you are finished.
328
+
329
+ Parameters
330
+ ----------
331
+ handle: str
332
+ Which handle to upload data to.
333
+ data: bytes
334
+ Block of data to add to the handle.
335
+ """
336
+ data = base64.b64encode(data).decode()
337
+ try:
338
+ self._send_to_api(
339
+ method="post",
340
+ endpoint="add-block",
341
+ json={"handle": handle, "data": data},
342
+ )
343
+ except DatabricksException as e:
344
+ if e.error_code == "RESOURCE_DOES_NOT_EXIST":
345
+ raise FileNotFoundError(e.message) from e
346
+ elif e.error_code == "MAX_BLOCK_SIZE_EXCEEDED":
347
+ raise ValueError(e.message) from e
348
+
349
+ raise
350
+
351
+ def _get_data(self, path, start, end):
352
+ """
353
+ Download data in bytes from a given absolute path in a block
354
+ from [start, start+length].
355
+ The maximum number of allowed bytes to read is 1MB.
356
+
357
+ Parameters
358
+ ----------
359
+ path: str
360
+ Absolute path to download data from
361
+ start: int
362
+ Start position of the block
363
+ end: int
364
+ End position of the block
365
+ """
366
+ try:
367
+ r = self._send_to_api(
368
+ method="get",
369
+ endpoint="read",
370
+ json={"path": path, "offset": start, "length": end - start},
371
+ )
372
+ return base64.b64decode(r["data"])
373
+ except DatabricksException as e:
374
+ if e.error_code == "RESOURCE_DOES_NOT_EXIST":
375
+ raise FileNotFoundError(e.message) from e
376
+ elif e.error_code in ["INVALID_PARAMETER_VALUE", "MAX_READ_SIZE_EXCEEDED"]:
377
+ raise ValueError(e.message) from e
378
+
379
+ raise
380
+
381
+ def invalidate_cache(self, path=None):
382
+ if path is None:
383
+ self.dircache.clear()
384
+ else:
385
+ self.dircache.pop(path, None)
386
+ super().invalidate_cache(path)
387
+
388
+
389
+ class DatabricksFile(AbstractBufferedFile):
390
+ """
391
+ Helper class for files referenced in the DatabricksFileSystem.
392
+ """
393
+
394
+ DEFAULT_BLOCK_SIZE = 1 * 2**20 # only allowed block size
395
+
396
+ def __init__(
397
+ self,
398
+ fs,
399
+ path,
400
+ mode="rb",
401
+ block_size="default",
402
+ autocommit=True,
403
+ cache_type="readahead",
404
+ cache_options=None,
405
+ **kwargs,
406
+ ):
407
+ """
408
+ Create a new instance of the DatabricksFile.
409
+
410
+ The blocksize needs to be the default one.
411
+ """
412
+ if block_size is None or block_size == "default":
413
+ block_size = self.DEFAULT_BLOCK_SIZE
414
+
415
+ assert (
416
+ block_size == self.DEFAULT_BLOCK_SIZE
417
+ ), f"Only the default block size is allowed, not {block_size}"
418
+
419
+ super().__init__(
420
+ fs,
421
+ path,
422
+ mode=mode,
423
+ block_size=block_size,
424
+ autocommit=autocommit,
425
+ cache_type=cache_type,
426
+ cache_options=cache_options or {},
427
+ **kwargs,
428
+ )
429
+
430
+ def _initiate_upload(self):
431
+ """Internal function to start a file upload"""
432
+ self.handle = self.fs._create_handle(self.path)
433
+
434
+ def _upload_chunk(self, final=False):
435
+ """Internal function to add a chunk of data to a started upload"""
436
+ self.buffer.seek(0)
437
+ data = self.buffer.getvalue()
438
+
439
+ data_chunks = [
440
+ data[start:end] for start, end in self._to_sized_blocks(len(data))
441
+ ]
442
+
443
+ for data_chunk in data_chunks:
444
+ self.fs._add_data(handle=self.handle, data=data_chunk)
445
+
446
+ if final:
447
+ self.fs._close_handle(handle=self.handle)
448
+ return True
449
+
450
+ def _fetch_range(self, start, end):
451
+ """Internal function to download a block of data"""
452
+ return_buffer = b""
453
+ length = end - start
454
+ for chunk_start, chunk_end in self._to_sized_blocks(length, start):
455
+ return_buffer += self.fs._get_data(
456
+ path=self.path, start=chunk_start, end=chunk_end
457
+ )
458
+
459
+ return return_buffer
460
+
461
+ def _to_sized_blocks(self, length, start=0):
462
+ """Helper function to split a range from 0 to total_length into bloksizes"""
463
+ end = start + length
464
+ for data_chunk in range(start, end, self.blocksize):
465
+ data_start = data_chunk
466
+ data_end = min(end, data_chunk + self.blocksize)
467
+ yield data_start, data_end
mgm/lib/python3.10/site-packages/fsspec/implementations/dirfs.py ADDED
@@ -0,0 +1,384 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .. import filesystem
2
+ from ..asyn import AsyncFileSystem
3
+
4
+
5
+ class DirFileSystem(AsyncFileSystem):
6
+ """Directory prefix filesystem
7
+
8
+ The DirFileSystem is a filesystem-wrapper. It assumes every path it is dealing with
9
+ is relative to the `path`. After performing the necessary paths operation it
10
+ delegates everything to the wrapped filesystem.
11
+ """
12
+
13
+ protocol = "dir"
14
+
15
+ def __init__(
16
+ self,
17
+ path=None,
18
+ fs=None,
19
+ fo=None,
20
+ target_protocol=None,
21
+ target_options=None,
22
+ **storage_options,
23
+ ):
24
+ """
25
+ Parameters
26
+ ----------
27
+ path: str
28
+ Path to the directory.
29
+ fs: AbstractFileSystem
30
+ An instantiated filesystem to wrap.
31
+ target_protocol, target_options:
32
+ if fs is none, construct it from these
33
+ fo: str
34
+ Alternate for path; do not provide both
35
+ """
36
+ super().__init__(**storage_options)
37
+ if fs is None:
38
+ fs = filesystem(protocol=target_protocol, **(target_options or {}))
39
+ if (path is not None) ^ (fo is not None) is False:
40
+ raise ValueError("Provide path or fo, not both")
41
+ path = path or fo
42
+
43
+ if self.asynchronous and not fs.async_impl:
44
+ raise ValueError("can't use asynchronous with non-async fs")
45
+
46
+ if fs.async_impl and self.asynchronous != fs.asynchronous:
47
+ raise ValueError("both dirfs and fs should be in the same sync/async mode")
48
+
49
+ self.path = fs._strip_protocol(path)
50
+ self.fs = fs
51
+
52
+ def _join(self, path):
53
+ if isinstance(path, str):
54
+ if not self.path:
55
+ return path
56
+ if not path:
57
+ return self.path
58
+ return self.fs.sep.join((self.path, self._strip_protocol(path)))
59
+ if isinstance(path, dict):
60
+ return {self._join(_path): value for _path, value in path.items()}
61
+ return [self._join(_path) for _path in path]
62
+
63
+ def _relpath(self, path):
64
+ if isinstance(path, str):
65
+ if not self.path:
66
+ return path
67
+ # We need to account for S3FileSystem returning paths that do not
68
+ # start with a '/'
69
+ if path == self.path or (
70
+ self.path.startswith(self.fs.sep) and path == self.path[1:]
71
+ ):
72
+ return ""
73
+ prefix = self.path + self.fs.sep
74
+ if self.path.startswith(self.fs.sep) and not path.startswith(self.fs.sep):
75
+ prefix = prefix[1:]
76
+ assert path.startswith(prefix)
77
+ return path[len(prefix) :]
78
+ return [self._relpath(_path) for _path in path]
79
+
80
+ # Wrappers below
81
+
82
+ @property
83
+ def sep(self):
84
+ return self.fs.sep
85
+
86
+ async def set_session(self, *args, **kwargs):
87
+ return await self.fs.set_session(*args, **kwargs)
88
+
89
+ async def _rm_file(self, path, **kwargs):
90
+ return await self.fs._rm_file(self._join(path), **kwargs)
91
+
92
+ def rm_file(self, path, **kwargs):
93
+ return self.fs.rm_file(self._join(path), **kwargs)
94
+
95
+ async def _rm(self, path, *args, **kwargs):
96
+ return await self.fs._rm(self._join(path), *args, **kwargs)
97
+
98
+ def rm(self, path, *args, **kwargs):
99
+ return self.fs.rm(self._join(path), *args, **kwargs)
100
+
101
+ async def _cp_file(self, path1, path2, **kwargs):
102
+ return await self.fs._cp_file(self._join(path1), self._join(path2), **kwargs)
103
+
104
+ def cp_file(self, path1, path2, **kwargs):
105
+ return self.fs.cp_file(self._join(path1), self._join(path2), **kwargs)
106
+
107
+ async def _copy(
108
+ self,
109
+ path1,
110
+ path2,
111
+ *args,
112
+ **kwargs,
113
+ ):
114
+ return await self.fs._copy(
115
+ self._join(path1),
116
+ self._join(path2),
117
+ *args,
118
+ **kwargs,
119
+ )
120
+
121
+ def copy(self, path1, path2, *args, **kwargs):
122
+ return self.fs.copy(
123
+ self._join(path1),
124
+ self._join(path2),
125
+ *args,
126
+ **kwargs,
127
+ )
128
+
129
+ async def _pipe(self, path, *args, **kwargs):
130
+ return await self.fs._pipe(self._join(path), *args, **kwargs)
131
+
132
+ def pipe(self, path, *args, **kwargs):
133
+ return self.fs.pipe(self._join(path), *args, **kwargs)
134
+
135
+ async def _pipe_file(self, path, *args, **kwargs):
136
+ return await self.fs._pipe_file(self._join(path), *args, **kwargs)
137
+
138
+ def pipe_file(self, path, *args, **kwargs):
139
+ return self.fs.pipe_file(self._join(path), *args, **kwargs)
140
+
141
+ async def _cat_file(self, path, *args, **kwargs):
142
+ return await self.fs._cat_file(self._join(path), *args, **kwargs)
143
+
144
+ def cat_file(self, path, *args, **kwargs):
145
+ return self.fs.cat_file(self._join(path), *args, **kwargs)
146
+
147
+ async def _cat(self, path, *args, **kwargs):
148
+ ret = await self.fs._cat(
149
+ self._join(path),
150
+ *args,
151
+ **kwargs,
152
+ )
153
+
154
+ if isinstance(ret, dict):
155
+ return {self._relpath(key): value for key, value in ret.items()}
156
+
157
+ return ret
158
+
159
+ def cat(self, path, *args, **kwargs):
160
+ ret = self.fs.cat(
161
+ self._join(path),
162
+ *args,
163
+ **kwargs,
164
+ )
165
+
166
+ if isinstance(ret, dict):
167
+ return {self._relpath(key): value for key, value in ret.items()}
168
+
169
+ return ret
170
+
171
+ async def _put_file(self, lpath, rpath, **kwargs):
172
+ return await self.fs._put_file(lpath, self._join(rpath), **kwargs)
173
+
174
+ def put_file(self, lpath, rpath, **kwargs):
175
+ return self.fs.put_file(lpath, self._join(rpath), **kwargs)
176
+
177
+ async def _put(
178
+ self,
179
+ lpath,
180
+ rpath,
181
+ *args,
182
+ **kwargs,
183
+ ):
184
+ return await self.fs._put(
185
+ lpath,
186
+ self._join(rpath),
187
+ *args,
188
+ **kwargs,
189
+ )
190
+
191
+ def put(self, lpath, rpath, *args, **kwargs):
192
+ return self.fs.put(
193
+ lpath,
194
+ self._join(rpath),
195
+ *args,
196
+ **kwargs,
197
+ )
198
+
199
+ async def _get_file(self, rpath, lpath, **kwargs):
200
+ return await self.fs._get_file(self._join(rpath), lpath, **kwargs)
201
+
202
+ def get_file(self, rpath, lpath, **kwargs):
203
+ return self.fs.get_file(self._join(rpath), lpath, **kwargs)
204
+
205
+ async def _get(self, rpath, *args, **kwargs):
206
+ return await self.fs._get(self._join(rpath), *args, **kwargs)
207
+
208
+ def get(self, rpath, *args, **kwargs):
209
+ return self.fs.get(self._join(rpath), *args, **kwargs)
210
+
211
+ async def _isfile(self, path):
212
+ return await self.fs._isfile(self._join(path))
213
+
214
+ def isfile(self, path):
215
+ return self.fs.isfile(self._join(path))
216
+
217
+ async def _isdir(self, path):
218
+ return await self.fs._isdir(self._join(path))
219
+
220
+ def isdir(self, path):
221
+ return self.fs.isdir(self._join(path))
222
+
223
+ async def _size(self, path):
224
+ return await self.fs._size(self._join(path))
225
+
226
+ def size(self, path):
227
+ return self.fs.size(self._join(path))
228
+
229
+ async def _exists(self, path):
230
+ return await self.fs._exists(self._join(path))
231
+
232
+ def exists(self, path):
233
+ return self.fs.exists(self._join(path))
234
+
235
+ async def _info(self, path, **kwargs):
236
+ return await self.fs._info(self._join(path), **kwargs)
237
+
238
+ def info(self, path, **kwargs):
239
+ return self.fs.info(self._join(path), **kwargs)
240
+
241
+ async def _ls(self, path, detail=True, **kwargs):
242
+ ret = (await self.fs._ls(self._join(path), detail=detail, **kwargs)).copy()
243
+ if detail:
244
+ out = []
245
+ for entry in ret:
246
+ entry = entry.copy()
247
+ entry["name"] = self._relpath(entry["name"])
248
+ out.append(entry)
249
+ return out
250
+
251
+ return self._relpath(ret)
252
+
253
+ def ls(self, path, detail=True, **kwargs):
254
+ ret = self.fs.ls(self._join(path), detail=detail, **kwargs).copy()
255
+ if detail:
256
+ out = []
257
+ for entry in ret:
258
+ entry = entry.copy()
259
+ entry["name"] = self._relpath(entry["name"])
260
+ out.append(entry)
261
+ return out
262
+
263
+ return self._relpath(ret)
264
+
265
+ async def _walk(self, path, *args, **kwargs):
266
+ async for root, dirs, files in self.fs._walk(self._join(path), *args, **kwargs):
267
+ yield self._relpath(root), dirs, files
268
+
269
+ def walk(self, path, *args, **kwargs):
270
+ for root, dirs, files in self.fs.walk(self._join(path), *args, **kwargs):
271
+ yield self._relpath(root), dirs, files
272
+
273
+ async def _glob(self, path, **kwargs):
274
+ detail = kwargs.get("detail", False)
275
+ ret = await self.fs._glob(self._join(path), **kwargs)
276
+ if detail:
277
+ return {self._relpath(path): info for path, info in ret.items()}
278
+ return self._relpath(ret)
279
+
280
+ def glob(self, path, **kwargs):
281
+ detail = kwargs.get("detail", False)
282
+ ret = self.fs.glob(self._join(path), **kwargs)
283
+ if detail:
284
+ return {self._relpath(path): info for path, info in ret.items()}
285
+ return self._relpath(ret)
286
+
287
+ async def _du(self, path, *args, **kwargs):
288
+ total = kwargs.get("total", True)
289
+ ret = await self.fs._du(self._join(path), *args, **kwargs)
290
+ if total:
291
+ return ret
292
+
293
+ return {self._relpath(path): size for path, size in ret.items()}
294
+
295
+ def du(self, path, *args, **kwargs):
296
+ total = kwargs.get("total", True)
297
+ ret = self.fs.du(self._join(path), *args, **kwargs)
298
+ if total:
299
+ return ret
300
+
301
+ return {self._relpath(path): size for path, size in ret.items()}
302
+
303
+ async def _find(self, path, *args, **kwargs):
304
+ detail = kwargs.get("detail", False)
305
+ ret = await self.fs._find(self._join(path), *args, **kwargs)
306
+ if detail:
307
+ return {self._relpath(path): info for path, info in ret.items()}
308
+ return self._relpath(ret)
309
+
310
+ def find(self, path, *args, **kwargs):
311
+ detail = kwargs.get("detail", False)
312
+ ret = self.fs.find(self._join(path), *args, **kwargs)
313
+ if detail:
314
+ return {self._relpath(path): info for path, info in ret.items()}
315
+ return self._relpath(ret)
316
+
317
+ async def _expand_path(self, path, *args, **kwargs):
318
+ return self._relpath(
319
+ await self.fs._expand_path(self._join(path), *args, **kwargs)
320
+ )
321
+
322
+ def expand_path(self, path, *args, **kwargs):
323
+ return self._relpath(self.fs.expand_path(self._join(path), *args, **kwargs))
324
+
325
+ async def _mkdir(self, path, *args, **kwargs):
326
+ return await self.fs._mkdir(self._join(path), *args, **kwargs)
327
+
328
+ def mkdir(self, path, *args, **kwargs):
329
+ return self.fs.mkdir(self._join(path), *args, **kwargs)
330
+
331
+ async def _makedirs(self, path, *args, **kwargs):
332
+ return await self.fs._makedirs(self._join(path), *args, **kwargs)
333
+
334
+ def makedirs(self, path, *args, **kwargs):
335
+ return self.fs.makedirs(self._join(path), *args, **kwargs)
336
+
337
+ def rmdir(self, path):
338
+ return self.fs.rmdir(self._join(path))
339
+
340
+ def mv(self, path1, path2, **kwargs):
341
+ return self.fs.mv(
342
+ self._join(path1),
343
+ self._join(path2),
344
+ **kwargs,
345
+ )
346
+
347
+ def touch(self, path, **kwargs):
348
+ return self.fs.touch(self._join(path), **kwargs)
349
+
350
+ def created(self, path):
351
+ return self.fs.created(self._join(path))
352
+
353
+ def modified(self, path):
354
+ return self.fs.modified(self._join(path))
355
+
356
+ def sign(self, path, *args, **kwargs):
357
+ return self.fs.sign(self._join(path), *args, **kwargs)
358
+
359
+ def __repr__(self):
360
+ return f"{self.__class__.__qualname__}(path='{self.path}', fs={self.fs})"
361
+
362
+ def open(
363
+ self,
364
+ path,
365
+ *args,
366
+ **kwargs,
367
+ ):
368
+ return self.fs.open(
369
+ self._join(path),
370
+ *args,
371
+ **kwargs,
372
+ )
373
+
374
+ async def open_async(
375
+ self,
376
+ path,
377
+ *args,
378
+ **kwargs,
379
+ ):
380
+ return await self.fs.open_async(
381
+ self._join(path),
382
+ *args,
383
+ **kwargs,
384
+ )
mgm/lib/python3.10/site-packages/fsspec/implementations/ftp.py ADDED
@@ -0,0 +1,395 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import uuid
4
+ import warnings
5
+ from ftplib import FTP, FTP_TLS, Error, error_perm
6
+ from typing import Any
7
+
8
+ from ..spec import AbstractBufferedFile, AbstractFileSystem
9
+ from ..utils import infer_storage_options, isfilelike
10
+
11
+
12
+ class FTPFileSystem(AbstractFileSystem):
13
+ """A filesystem over classic FTP"""
14
+
15
+ root_marker = "/"
16
+ cachable = False
17
+ protocol = "ftp"
18
+
19
+ def __init__(
20
+ self,
21
+ host,
22
+ port=21,
23
+ username=None,
24
+ password=None,
25
+ acct=None,
26
+ block_size=None,
27
+ tempdir=None,
28
+ timeout=30,
29
+ encoding="utf-8",
30
+ tls=False,
31
+ **kwargs,
32
+ ):
33
+ """
34
+ You can use _get_kwargs_from_urls to get some kwargs from
35
+ a reasonable FTP url.
36
+
37
+ Authentication will be anonymous if username/password are not
38
+ given.
39
+
40
+ Parameters
41
+ ----------
42
+ host: str
43
+ The remote server name/ip to connect to
44
+ port: int
45
+ Port to connect with
46
+ username: str or None
47
+ If authenticating, the user's identifier
48
+ password: str of None
49
+ User's password on the server, if using
50
+ acct: str or None
51
+ Some servers also need an "account" string for auth
52
+ block_size: int or None
53
+ If given, the read-ahead or write buffer size.
54
+ tempdir: str
55
+ Directory on remote to put temporary files when in a transaction
56
+ timeout: int
57
+ Timeout of the ftp connection in seconds
58
+ encoding: str
59
+ Encoding to use for directories and filenames in FTP connection
60
+ tls: bool
61
+ Use FTP-TLS, by default False
62
+ """
63
+ super().__init__(**kwargs)
64
+ self.host = host
65
+ self.port = port
66
+ self.tempdir = tempdir or "/tmp"
67
+ self.cred = username or "", password or "", acct or ""
68
+ self.timeout = timeout
69
+ self.encoding = encoding
70
+ if block_size is not None:
71
+ self.blocksize = block_size
72
+ else:
73
+ self.blocksize = 2**16
74
+ self.tls = tls
75
+ self._connect()
76
+ if self.tls:
77
+ self.ftp.prot_p()
78
+
79
+ def _connect(self):
80
+ if self.tls:
81
+ ftp_cls = FTP_TLS
82
+ else:
83
+ ftp_cls = FTP
84
+ if sys.version_info >= (3, 9):
85
+ self.ftp = ftp_cls(timeout=self.timeout, encoding=self.encoding)
86
+ elif self.encoding:
87
+ warnings.warn("`encoding` not supported for python<3.9, ignoring")
88
+ self.ftp = ftp_cls(timeout=self.timeout)
89
+ else:
90
+ self.ftp = ftp_cls(timeout=self.timeout)
91
+ self.ftp.connect(self.host, self.port)
92
+ self.ftp.login(*self.cred)
93
+
94
+ @classmethod
95
+ def _strip_protocol(cls, path):
96
+ return "/" + infer_storage_options(path)["path"].lstrip("/").rstrip("/")
97
+
98
+ @staticmethod
99
+ def _get_kwargs_from_urls(urlpath):
100
+ out = infer_storage_options(urlpath)
101
+ out.pop("path", None)
102
+ out.pop("protocol", None)
103
+ return out
104
+
105
+ def ls(self, path, detail=True, **kwargs):
106
+ path = self._strip_protocol(path)
107
+ out = []
108
+ if path not in self.dircache:
109
+ try:
110
+ try:
111
+ out = [
112
+ (fn, details)
113
+ for (fn, details) in self.ftp.mlsd(path)
114
+ if fn not in [".", ".."]
115
+ and details["type"] not in ["pdir", "cdir"]
116
+ ]
117
+ except error_perm:
118
+ out = _mlsd2(self.ftp, path) # Not platform independent
119
+ for fn, details in out:
120
+ details["name"] = "/".join(
121
+ ["" if path == "/" else path, fn.lstrip("/")]
122
+ )
123
+ if details["type"] == "file":
124
+ details["size"] = int(details["size"])
125
+ else:
126
+ details["size"] = 0
127
+ if details["type"] == "dir":
128
+ details["type"] = "directory"
129
+ self.dircache[path] = out
130
+ except Error:
131
+ try:
132
+ info = self.info(path)
133
+ if info["type"] == "file":
134
+ out = [(path, info)]
135
+ except (Error, IndexError) as exc:
136
+ raise FileNotFoundError(path) from exc
137
+ files = self.dircache.get(path, out)
138
+ if not detail:
139
+ return sorted([fn for fn, details in files])
140
+ return [details for fn, details in files]
141
+
142
+ def info(self, path, **kwargs):
143
+ # implement with direct method
144
+ path = self._strip_protocol(path)
145
+ if path == "/":
146
+ # special case, since this dir has no real entry
147
+ return {"name": "/", "size": 0, "type": "directory"}
148
+ files = self.ls(self._parent(path).lstrip("/"), True)
149
+ try:
150
+ out = next(f for f in files if f["name"] == path)
151
+ except StopIteration as exc:
152
+ raise FileNotFoundError(path) from exc
153
+ return out
154
+
155
+ def get_file(self, rpath, lpath, **kwargs):
156
+ if self.isdir(rpath):
157
+ if not os.path.exists(lpath):
158
+ os.mkdir(lpath)
159
+ return
160
+ if isfilelike(lpath):
161
+ outfile = lpath
162
+ else:
163
+ outfile = open(lpath, "wb")
164
+
165
+ def cb(x):
166
+ outfile.write(x)
167
+
168
+ self.ftp.retrbinary(
169
+ f"RETR {rpath}",
170
+ blocksize=self.blocksize,
171
+ callback=cb,
172
+ )
173
+ if not isfilelike(lpath):
174
+ outfile.close()
175
+
176
+ def cat_file(self, path, start=None, end=None, **kwargs):
177
+ if end is not None:
178
+ return super().cat_file(path, start, end, **kwargs)
179
+ out = []
180
+
181
+ def cb(x):
182
+ out.append(x)
183
+
184
+ try:
185
+ self.ftp.retrbinary(
186
+ f"RETR {path}",
187
+ blocksize=self.blocksize,
188
+ rest=start,
189
+ callback=cb,
190
+ )
191
+ except (Error, error_perm) as orig_exc:
192
+ raise FileNotFoundError(path) from orig_exc
193
+ return b"".join(out)
194
+
195
+ def _open(
196
+ self,
197
+ path,
198
+ mode="rb",
199
+ block_size=None,
200
+ cache_options=None,
201
+ autocommit=True,
202
+ **kwargs,
203
+ ):
204
+ path = self._strip_protocol(path)
205
+ block_size = block_size or self.blocksize
206
+ return FTPFile(
207
+ self,
208
+ path,
209
+ mode=mode,
210
+ block_size=block_size,
211
+ tempdir=self.tempdir,
212
+ autocommit=autocommit,
213
+ cache_options=cache_options,
214
+ )
215
+
216
+ def _rm(self, path):
217
+ path = self._strip_protocol(path)
218
+ self.ftp.delete(path)
219
+ self.invalidate_cache(self._parent(path))
220
+
221
+ def rm(self, path, recursive=False, maxdepth=None):
222
+ paths = self.expand_path(path, recursive=recursive, maxdepth=maxdepth)
223
+ for p in reversed(paths):
224
+ if self.isfile(p):
225
+ self.rm_file(p)
226
+ else:
227
+ self.rmdir(p)
228
+
229
+ def mkdir(self, path: str, create_parents: bool = True, **kwargs: Any) -> None:
230
+ path = self._strip_protocol(path)
231
+ parent = self._parent(path)
232
+ if parent != self.root_marker and not self.exists(parent) and create_parents:
233
+ self.mkdir(parent, create_parents=create_parents)
234
+
235
+ self.ftp.mkd(path)
236
+ self.invalidate_cache(self._parent(path))
237
+
238
+ def makedirs(self, path: str, exist_ok: bool = False) -> None:
239
+ path = self._strip_protocol(path)
240
+ if self.exists(path):
241
+ # NB: "/" does not "exist" as it has no directory entry
242
+ if not exist_ok:
243
+ raise FileExistsError(f"{path} exists without `exist_ok`")
244
+ # exists_ok=True -> no-op
245
+ else:
246
+ self.mkdir(path, create_parents=True)
247
+
248
+ def rmdir(self, path):
249
+ path = self._strip_protocol(path)
250
+ self.ftp.rmd(path)
251
+ self.invalidate_cache(self._parent(path))
252
+
253
+ def mv(self, path1, path2, **kwargs):
254
+ path1 = self._strip_protocol(path1)
255
+ path2 = self._strip_protocol(path2)
256
+ self.ftp.rename(path1, path2)
257
+ self.invalidate_cache(self._parent(path1))
258
+ self.invalidate_cache(self._parent(path2))
259
+
260
+ def __del__(self):
261
+ self.ftp.close()
262
+
263
+ def invalidate_cache(self, path=None):
264
+ if path is None:
265
+ self.dircache.clear()
266
+ else:
267
+ self.dircache.pop(path, None)
268
+ super().invalidate_cache(path)
269
+
270
+
271
+ class TransferDone(Exception):
272
+ """Internal exception to break out of transfer"""
273
+
274
+ pass
275
+
276
+
277
+ class FTPFile(AbstractBufferedFile):
278
+ """Interact with a remote FTP file with read/write buffering"""
279
+
280
+ def __init__(
281
+ self,
282
+ fs,
283
+ path,
284
+ mode="rb",
285
+ block_size="default",
286
+ autocommit=True,
287
+ cache_type="readahead",
288
+ cache_options=None,
289
+ **kwargs,
290
+ ):
291
+ super().__init__(
292
+ fs,
293
+ path,
294
+ mode=mode,
295
+ block_size=block_size,
296
+ autocommit=autocommit,
297
+ cache_type=cache_type,
298
+ cache_options=cache_options,
299
+ **kwargs,
300
+ )
301
+ if not autocommit:
302
+ self.target = self.path
303
+ self.path = "/".join([kwargs["tempdir"], str(uuid.uuid4())])
304
+
305
+ def commit(self):
306
+ self.fs.mv(self.path, self.target)
307
+
308
+ def discard(self):
309
+ self.fs.rm(self.path)
310
+
311
+ def _fetch_range(self, start, end):
312
+ """Get bytes between given byte limits
313
+
314
+ Implemented by raising an exception in the fetch callback when the
315
+ number of bytes received reaches the requested amount.
316
+
317
+ Will fail if the server does not respect the REST command on
318
+ retrieve requests.
319
+ """
320
+ out = []
321
+ total = [0]
322
+
323
+ def callback(x):
324
+ total[0] += len(x)
325
+ if total[0] > end - start:
326
+ out.append(x[: (end - start) - total[0]])
327
+ if end < self.size:
328
+ raise TransferDone
329
+ else:
330
+ out.append(x)
331
+
332
+ if total[0] == end - start and end < self.size:
333
+ raise TransferDone
334
+
335
+ try:
336
+ self.fs.ftp.retrbinary(
337
+ f"RETR {self.path}",
338
+ blocksize=self.blocksize,
339
+ rest=start,
340
+ callback=callback,
341
+ )
342
+ except TransferDone:
343
+ try:
344
+ # stop transfer, we got enough bytes for this block
345
+ self.fs.ftp.abort()
346
+ self.fs.ftp.getmultiline()
347
+ except Error:
348
+ self.fs._connect()
349
+
350
+ return b"".join(out)
351
+
352
+ def _upload_chunk(self, final=False):
353
+ self.buffer.seek(0)
354
+ self.fs.ftp.storbinary(
355
+ f"STOR {self.path}", self.buffer, blocksize=self.blocksize, rest=self.offset
356
+ )
357
+ return True
358
+
359
+
360
+ def _mlsd2(ftp, path="."):
361
+ """
362
+ Fall back to using `dir` instead of `mlsd` if not supported.
363
+
364
+ This parses a Linux style `ls -l` response to `dir`, but the response may
365
+ be platform dependent.
366
+
367
+ Parameters
368
+ ----------
369
+ ftp: ftplib.FTP
370
+ path: str
371
+ Expects to be given path, but defaults to ".".
372
+ """
373
+ lines = []
374
+ minfo = []
375
+ ftp.dir(path, lines.append)
376
+ for line in lines:
377
+ split_line = line.split()
378
+ if len(split_line) < 9:
379
+ continue
380
+ this = (
381
+ split_line[-1],
382
+ {
383
+ "modify": " ".join(split_line[5:8]),
384
+ "unix.owner": split_line[2],
385
+ "unix.group": split_line[3],
386
+ "unix.mode": split_line[0],
387
+ "size": split_line[4],
388
+ },
389
+ )
390
+ if this[1]["unix.mode"][0] == "d":
391
+ this[1]["type"] = "dir"
392
+ else:
393
+ this[1]["type"] = "file"
394
+ minfo.append(this)
395
+ return minfo
mgm/lib/python3.10/site-packages/fsspec/implementations/git.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import pygit2
4
+
5
+ from fsspec.spec import AbstractFileSystem
6
+
7
+ from .memory import MemoryFile
8
+
9
+
10
+ class GitFileSystem(AbstractFileSystem):
11
+ """Browse the files of a local git repo at any hash/tag/branch
12
+
13
+ (experimental backend)
14
+ """
15
+
16
+ root_marker = ""
17
+ cachable = True
18
+
19
+ def __init__(self, path=None, fo=None, ref=None, **kwargs):
20
+ """
21
+
22
+ Parameters
23
+ ----------
24
+ path: str (optional)
25
+ Local location of the repo (uses current directory if not given).
26
+ May be deprecated in favour of ``fo``. When used with a higher
27
+ level function such as fsspec.open(), may be of the form
28
+ "git://[path-to-repo[:]][ref@]path/to/file" (but the actual
29
+ file path should not contain "@" or ":").
30
+ fo: str (optional)
31
+ Same as ``path``, but passed as part of a chained URL. This one
32
+ takes precedence if both are given.
33
+ ref: str (optional)
34
+ Reference to work with, could be a hash, tag or branch name. Defaults
35
+ to current working tree. Note that ``ls`` and ``open`` also take hash,
36
+ so this becomes the default for those operations
37
+ kwargs
38
+ """
39
+ super().__init__(**kwargs)
40
+ self.repo = pygit2.Repository(fo or path or os.getcwd())
41
+ self.ref = ref or "master"
42
+
43
+ @classmethod
44
+ def _strip_protocol(cls, path):
45
+ path = super()._strip_protocol(path).lstrip("/")
46
+ if ":" in path:
47
+ path = path.split(":", 1)[1]
48
+ if "@" in path:
49
+ path = path.split("@", 1)[1]
50
+ return path.lstrip("/")
51
+
52
+ def _path_to_object(self, path, ref):
53
+ comm, ref = self.repo.resolve_refish(ref or self.ref)
54
+ parts = path.split("/")
55
+ tree = comm.tree
56
+ for part in parts:
57
+ if part and isinstance(tree, pygit2.Tree):
58
+ if part not in tree:
59
+ raise FileNotFoundError(path)
60
+ tree = tree[part]
61
+ return tree
62
+
63
+ @staticmethod
64
+ def _get_kwargs_from_urls(path):
65
+ if path.startswith("git://"):
66
+ path = path[6:]
67
+ out = {}
68
+ if ":" in path:
69
+ out["path"], path = path.split(":", 1)
70
+ if "@" in path:
71
+ out["ref"], path = path.split("@", 1)
72
+ return out
73
+
74
+ @staticmethod
75
+ def _object_to_info(obj, path=None):
76
+ # obj.name and obj.filemode are None for the root tree!
77
+ is_dir = isinstance(obj, pygit2.Tree)
78
+ return {
79
+ "type": "directory" if is_dir else "file",
80
+ "name": (
81
+ "/".join([path, obj.name or ""]).lstrip("/") if path else obj.name
82
+ ),
83
+ "hex": str(obj.id),
84
+ "mode": "100644" if obj.filemode is None else f"{obj.filemode:o}",
85
+ "size": 0 if is_dir else obj.size,
86
+ }
87
+
88
+ def ls(self, path, detail=True, ref=None, **kwargs):
89
+ tree = self._path_to_object(self._strip_protocol(path), ref)
90
+ return [
91
+ GitFileSystem._object_to_info(obj, path)
92
+ if detail
93
+ else GitFileSystem._object_to_info(obj, path)["name"]
94
+ for obj in (tree if isinstance(tree, pygit2.Tree) else [tree])
95
+ ]
96
+
97
+ def info(self, path, ref=None, **kwargs):
98
+ tree = self._path_to_object(self._strip_protocol(path), ref)
99
+ return GitFileSystem._object_to_info(tree, path)
100
+
101
+ def ukey(self, path, ref=None):
102
+ return self.info(path, ref=ref)["hex"]
103
+
104
+ def _open(
105
+ self,
106
+ path,
107
+ mode="rb",
108
+ block_size=None,
109
+ autocommit=True,
110
+ cache_options=None,
111
+ ref=None,
112
+ **kwargs,
113
+ ):
114
+ obj = self._path_to_object(path, ref or self.ref)
115
+ return MemoryFile(data=obj.data)
mgm/lib/python3.10/site-packages/fsspec/implementations/github.py ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+
3
+ import fsspec
4
+
5
+ from ..spec import AbstractFileSystem
6
+ from ..utils import infer_storage_options
7
+ from .memory import MemoryFile
8
+
9
+ # TODO: add GIST backend, would be very similar
10
+
11
+
12
+ class GithubFileSystem(AbstractFileSystem):
13
+ """Interface to files in github
14
+
15
+ An instance of this class provides the files residing within a remote github
16
+ repository. You may specify a point in the repos history, by SHA, branch
17
+ or tag (default is current master).
18
+
19
+ Given that code files tend to be small, and that github does not support
20
+ retrieving partial content, we always fetch whole files.
21
+
22
+ When using fsspec.open, allows URIs of the form:
23
+
24
+ - "github://path/file", in which case you must specify org, repo and
25
+ may specify sha in the extra args
26
+ - 'github://org:repo@/precip/catalog.yml', where the org and repo are
27
+ part of the URI
28
+ - 'github://org:repo@sha/precip/catalog.yml', where the sha is also included
29
+
30
+ ``sha`` can be the full or abbreviated hex of the commit you want to fetch
31
+ from, or a branch or tag name (so long as it doesn't contain special characters
32
+ like "/", "?", which would have to be HTTP-encoded).
33
+
34
+ For authorised access, you must provide username and token, which can be made
35
+ at https://github.com/settings/tokens
36
+ """
37
+
38
+ url = "https://api.github.com/repos/{org}/{repo}/git/trees/{sha}"
39
+ rurl = "https://raw.githubusercontent.com/{org}/{repo}/{sha}/{path}"
40
+ protocol = "github"
41
+ timeout = (60, 60) # connect, read timeouts
42
+
43
+ def __init__(
44
+ self, org, repo, sha=None, username=None, token=None, timeout=None, **kwargs
45
+ ):
46
+ super().__init__(**kwargs)
47
+ self.org = org
48
+ self.repo = repo
49
+ if (username is None) ^ (token is None):
50
+ raise ValueError("Auth required both username and token")
51
+ self.username = username
52
+ self.token = token
53
+ if timeout is not None:
54
+ self.timeout = timeout
55
+ if sha is None:
56
+ # look up default branch (not necessarily "master")
57
+ u = "https://api.github.com/repos/{org}/{repo}"
58
+ r = requests.get(
59
+ u.format(org=org, repo=repo), timeout=self.timeout, **self.kw
60
+ )
61
+ r.raise_for_status()
62
+ sha = r.json()["default_branch"]
63
+
64
+ self.root = sha
65
+ self.ls("")
66
+
67
+ @property
68
+ def kw(self):
69
+ if self.username:
70
+ return {"auth": (self.username, self.token)}
71
+ return {}
72
+
73
+ @classmethod
74
+ def repos(cls, org_or_user, is_org=True):
75
+ """List repo names for given org or user
76
+
77
+ This may become the top level of the FS
78
+
79
+ Parameters
80
+ ----------
81
+ org_or_user: str
82
+ Name of the github org or user to query
83
+ is_org: bool (default True)
84
+ Whether the name is an organisation (True) or user (False)
85
+
86
+ Returns
87
+ -------
88
+ List of string
89
+ """
90
+ r = requests.get(
91
+ f"https://api.github.com/{['users', 'orgs'][is_org]}/{org_or_user}/repos",
92
+ timeout=cls.timeout,
93
+ )
94
+ r.raise_for_status()
95
+ return [repo["name"] for repo in r.json()]
96
+
97
+ @property
98
+ def tags(self):
99
+ """Names of tags in the repo"""
100
+ r = requests.get(
101
+ f"https://api.github.com/repos/{self.org}/{self.repo}/tags",
102
+ timeout=self.timeout,
103
+ **self.kw,
104
+ )
105
+ r.raise_for_status()
106
+ return [t["name"] for t in r.json()]
107
+
108
+ @property
109
+ def branches(self):
110
+ """Names of branches in the repo"""
111
+ r = requests.get(
112
+ f"https://api.github.com/repos/{self.org}/{self.repo}/branches",
113
+ timeout=self.timeout,
114
+ **self.kw,
115
+ )
116
+ r.raise_for_status()
117
+ return [t["name"] for t in r.json()]
118
+
119
+ @property
120
+ def refs(self):
121
+ """Named references, tags and branches"""
122
+ return {"tags": self.tags, "branches": self.branches}
123
+
124
+ def ls(self, path, detail=False, sha=None, _sha=None, **kwargs):
125
+ """List files at given path
126
+
127
+ Parameters
128
+ ----------
129
+ path: str
130
+ Location to list, relative to repo root
131
+ detail: bool
132
+ If True, returns list of dicts, one per file; if False, returns
133
+ list of full filenames only
134
+ sha: str (optional)
135
+ List at the given point in the repo history, branch or tag name or commit
136
+ SHA
137
+ _sha: str (optional)
138
+ List this specific tree object (used internally to descend into trees)
139
+ """
140
+ path = self._strip_protocol(path)
141
+ if path == "":
142
+ _sha = sha or self.root
143
+ if _sha is None:
144
+ parts = path.rstrip("/").split("/")
145
+ so_far = ""
146
+ _sha = sha or self.root
147
+ for part in parts:
148
+ out = self.ls(so_far, True, sha=sha, _sha=_sha)
149
+ so_far += "/" + part if so_far else part
150
+ out = [o for o in out if o["name"] == so_far]
151
+ if not out:
152
+ raise FileNotFoundError(path)
153
+ out = out[0]
154
+ if out["type"] == "file":
155
+ if detail:
156
+ return [out]
157
+ else:
158
+ return path
159
+ _sha = out["sha"]
160
+ if path not in self.dircache or sha not in [self.root, None]:
161
+ r = requests.get(
162
+ self.url.format(org=self.org, repo=self.repo, sha=_sha),
163
+ timeout=self.timeout,
164
+ **self.kw,
165
+ )
166
+ if r.status_code == 404:
167
+ raise FileNotFoundError(path)
168
+ r.raise_for_status()
169
+ types = {"blob": "file", "tree": "directory"}
170
+ out = [
171
+ {
172
+ "name": path + "/" + f["path"] if path else f["path"],
173
+ "mode": f["mode"],
174
+ "type": types[f["type"]],
175
+ "size": f.get("size", 0),
176
+ "sha": f["sha"],
177
+ }
178
+ for f in r.json()["tree"]
179
+ if f["type"] in types
180
+ ]
181
+ if sha in [self.root, None]:
182
+ self.dircache[path] = out
183
+ else:
184
+ out = self.dircache[path]
185
+ if detail:
186
+ return out
187
+ else:
188
+ return sorted([f["name"] for f in out])
189
+
190
+ def invalidate_cache(self, path=None):
191
+ self.dircache.clear()
192
+
193
+ @classmethod
194
+ def _strip_protocol(cls, path):
195
+ opts = infer_storage_options(path)
196
+ if "username" not in opts:
197
+ return super()._strip_protocol(path)
198
+ return opts["path"].lstrip("/")
199
+
200
+ @staticmethod
201
+ def _get_kwargs_from_urls(path):
202
+ opts = infer_storage_options(path)
203
+ if "username" not in opts:
204
+ return {}
205
+ out = {"org": opts["username"], "repo": opts["password"]}
206
+ if opts["host"]:
207
+ out["sha"] = opts["host"]
208
+ return out
209
+
210
+ def _open(
211
+ self,
212
+ path,
213
+ mode="rb",
214
+ block_size=None,
215
+ autocommit=True,
216
+ cache_options=None,
217
+ sha=None,
218
+ **kwargs,
219
+ ):
220
+ if mode != "rb":
221
+ raise NotImplementedError
222
+ url = self.rurl.format(
223
+ org=self.org, repo=self.repo, path=path, sha=sha or self.root
224
+ )
225
+ r = requests.get(url, timeout=self.timeout, **self.kw)
226
+ if r.status_code == 404:
227
+ raise FileNotFoundError(path)
228
+ r.raise_for_status()
229
+ return MemoryFile(None, None, r.content)
230
+
231
+ def cat(self, path, recursive=False, on_error="raise", **kwargs):
232
+ paths = self.expand_path(path, recursive=recursive)
233
+ urls = [
234
+ self.rurl.format(org=self.org, repo=self.repo, path=u, sha=self.root)
235
+ for u, sh in paths
236
+ ]
237
+ fs = fsspec.filesystem("http")
238
+ data = fs.cat(urls, on_error="return")
239
+ return {u: v for ((k, v), u) in zip(data.items(), urls)}
mgm/lib/python3.10/site-packages/fsspec/implementations/http.py ADDED
@@ -0,0 +1,856 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ import io
3
+ import logging
4
+ import re
5
+ import weakref
6
+ from copy import copy
7
+ from urllib.parse import urlparse
8
+
9
+ import aiohttp
10
+ import yarl
11
+
12
+ from fsspec.asyn import AbstractAsyncStreamedFile, AsyncFileSystem, sync, sync_wrapper
13
+ from fsspec.callbacks import DEFAULT_CALLBACK
14
+ from fsspec.exceptions import FSTimeoutError
15
+ from fsspec.spec import AbstractBufferedFile
16
+ from fsspec.utils import (
17
+ DEFAULT_BLOCK_SIZE,
18
+ glob_translate,
19
+ isfilelike,
20
+ nullcontext,
21
+ tokenize,
22
+ )
23
+
24
+ from ..caching import AllBytes
25
+
26
+ # https://stackoverflow.com/a/15926317/3821154
27
+ ex = re.compile(r"""<(a|A)\s+(?:[^>]*?\s+)?(href|HREF)=["'](?P<url>[^"']+)""")
28
+ ex2 = re.compile(r"""(?P<url>http[s]?://[-a-zA-Z0-9@:%_+.~#?&/=]+)""")
29
+ logger = logging.getLogger("fsspec.http")
30
+
31
+
32
+ async def get_client(**kwargs):
33
+ return aiohttp.ClientSession(**kwargs)
34
+
35
+
36
+ class HTTPFileSystem(AsyncFileSystem):
37
+ """
38
+ Simple File-System for fetching data via HTTP(S)
39
+
40
+ ``ls()`` is implemented by loading the parent page and doing a regex
41
+ match on the result. If simple_link=True, anything of the form
42
+ "http(s)://server.com/stuff?thing=other"; otherwise only links within
43
+ HTML href tags will be used.
44
+ """
45
+
46
+ sep = "/"
47
+
48
+ def __init__(
49
+ self,
50
+ simple_links=True,
51
+ block_size=None,
52
+ same_scheme=True,
53
+ size_policy=None,
54
+ cache_type="bytes",
55
+ cache_options=None,
56
+ asynchronous=False,
57
+ loop=None,
58
+ client_kwargs=None,
59
+ get_client=get_client,
60
+ encoded=False,
61
+ **storage_options,
62
+ ):
63
+ """
64
+ NB: if this is called async, you must await set_client
65
+
66
+ Parameters
67
+ ----------
68
+ block_size: int
69
+ Blocks to read bytes; if 0, will default to raw requests file-like
70
+ objects instead of HTTPFile instances
71
+ simple_links: bool
72
+ If True, will consider both HTML <a> tags and anything that looks
73
+ like a URL; if False, will consider only the former.
74
+ same_scheme: True
75
+ When doing ls/glob, if this is True, only consider paths that have
76
+ http/https matching the input URLs.
77
+ size_policy: this argument is deprecated
78
+ client_kwargs: dict
79
+ Passed to aiohttp.ClientSession, see
80
+ https://docs.aiohttp.org/en/stable/client_reference.html
81
+ For example, ``{'auth': aiohttp.BasicAuth('user', 'pass')}``
82
+ get_client: Callable[..., aiohttp.ClientSession]
83
+ A callable which takes keyword arguments and constructs
84
+ an aiohttp.ClientSession. It's state will be managed by
85
+ the HTTPFileSystem class.
86
+ storage_options: key-value
87
+ Any other parameters passed on to requests
88
+ cache_type, cache_options: defaults used in open
89
+ """
90
+ super().__init__(self, asynchronous=asynchronous, loop=loop, **storage_options)
91
+ self.block_size = block_size if block_size is not None else DEFAULT_BLOCK_SIZE
92
+ self.simple_links = simple_links
93
+ self.same_schema = same_scheme
94
+ self.cache_type = cache_type
95
+ self.cache_options = cache_options
96
+ self.client_kwargs = client_kwargs or {}
97
+ self.get_client = get_client
98
+ self.encoded = encoded
99
+ self.kwargs = storage_options
100
+ self._session = None
101
+
102
+ # Clean caching-related parameters from `storage_options`
103
+ # before propagating them as `request_options` through `self.kwargs`.
104
+ # TODO: Maybe rename `self.kwargs` to `self.request_options` to make
105
+ # it clearer.
106
+ request_options = copy(storage_options)
107
+ self.use_listings_cache = request_options.pop("use_listings_cache", False)
108
+ request_options.pop("listings_expiry_time", None)
109
+ request_options.pop("max_paths", None)
110
+ request_options.pop("skip_instance_cache", None)
111
+ self.kwargs = request_options
112
+
113
+ @property
114
+ def fsid(self):
115
+ return "http"
116
+
117
+ def encode_url(self, url):
118
+ return yarl.URL(url, encoded=self.encoded)
119
+
120
+ @staticmethod
121
+ def close_session(loop, session):
122
+ if loop is not None and loop.is_running():
123
+ try:
124
+ sync(loop, session.close, timeout=0.1)
125
+ return
126
+ except (TimeoutError, FSTimeoutError, NotImplementedError):
127
+ pass
128
+ connector = getattr(session, "_connector", None)
129
+ if connector is not None:
130
+ # close after loop is dead
131
+ connector._close()
132
+
133
+ async def set_session(self):
134
+ if self._session is None:
135
+ self._session = await self.get_client(loop=self.loop, **self.client_kwargs)
136
+ if not self.asynchronous:
137
+ weakref.finalize(self, self.close_session, self.loop, self._session)
138
+ return self._session
139
+
140
+ @classmethod
141
+ def _strip_protocol(cls, path):
142
+ """For HTTP, we always want to keep the full URL"""
143
+ return path
144
+
145
+ @classmethod
146
+ def _parent(cls, path):
147
+ # override, since _strip_protocol is different for URLs
148
+ par = super()._parent(path)
149
+ if len(par) > 7: # "http://..."
150
+ return par
151
+ return ""
152
+
153
+ async def _ls_real(self, url, detail=True, **kwargs):
154
+ # ignoring URL-encoded arguments
155
+ kw = self.kwargs.copy()
156
+ kw.update(kwargs)
157
+ logger.debug(url)
158
+ session = await self.set_session()
159
+ async with session.get(self.encode_url(url), **self.kwargs) as r:
160
+ self._raise_not_found_for_status(r, url)
161
+ try:
162
+ text = await r.text()
163
+ if self.simple_links:
164
+ links = ex2.findall(text) + [u[2] for u in ex.findall(text)]
165
+ else:
166
+ links = [u[2] for u in ex.findall(text)]
167
+ except UnicodeDecodeError:
168
+ links = [] # binary, not HTML
169
+ out = set()
170
+ parts = urlparse(url)
171
+ for l in links:
172
+ if isinstance(l, tuple):
173
+ l = l[1]
174
+ if l.startswith("/") and len(l) > 1:
175
+ # absolute URL on this server
176
+ l = f"{parts.scheme}://{parts.netloc}{l}"
177
+ if l.startswith("http"):
178
+ if self.same_schema and l.startswith(url.rstrip("/") + "/"):
179
+ out.add(l)
180
+ elif l.replace("https", "http").startswith(
181
+ url.replace("https", "http").rstrip("/") + "/"
182
+ ):
183
+ # allowed to cross http <-> https
184
+ out.add(l)
185
+ else:
186
+ if l not in ["..", "../"]:
187
+ # Ignore FTP-like "parent"
188
+ out.add("/".join([url.rstrip("/"), l.lstrip("/")]))
189
+ if not out and url.endswith("/"):
190
+ out = await self._ls_real(url.rstrip("/"), detail=False)
191
+ if detail:
192
+ return [
193
+ {
194
+ "name": u,
195
+ "size": None,
196
+ "type": "directory" if u.endswith("/") else "file",
197
+ }
198
+ for u in out
199
+ ]
200
+ else:
201
+ return sorted(out)
202
+
203
+ async def _ls(self, url, detail=True, **kwargs):
204
+ if self.use_listings_cache and url in self.dircache:
205
+ out = self.dircache[url]
206
+ else:
207
+ out = await self._ls_real(url, detail=detail, **kwargs)
208
+ self.dircache[url] = out
209
+ return out
210
+
211
+ ls = sync_wrapper(_ls)
212
+
213
+ def _raise_not_found_for_status(self, response, url):
214
+ """
215
+ Raises FileNotFoundError for 404s, otherwise uses raise_for_status.
216
+ """
217
+ if response.status == 404:
218
+ raise FileNotFoundError(url)
219
+ response.raise_for_status()
220
+
221
+ async def _cat_file(self, url, start=None, end=None, **kwargs):
222
+ kw = self.kwargs.copy()
223
+ kw.update(kwargs)
224
+ logger.debug(url)
225
+
226
+ if start is not None or end is not None:
227
+ if start == end:
228
+ return b""
229
+ headers = kw.pop("headers", {}).copy()
230
+
231
+ headers["Range"] = await self._process_limits(url, start, end)
232
+ kw["headers"] = headers
233
+ session = await self.set_session()
234
+ async with session.get(self.encode_url(url), **kw) as r:
235
+ out = await r.read()
236
+ self._raise_not_found_for_status(r, url)
237
+ return out
238
+
239
+ async def _get_file(
240
+ self, rpath, lpath, chunk_size=5 * 2**20, callback=DEFAULT_CALLBACK, **kwargs
241
+ ):
242
+ kw = self.kwargs.copy()
243
+ kw.update(kwargs)
244
+ logger.debug(rpath)
245
+ session = await self.set_session()
246
+ async with session.get(self.encode_url(rpath), **kw) as r:
247
+ try:
248
+ size = int(r.headers["content-length"])
249
+ except (ValueError, KeyError):
250
+ size = None
251
+
252
+ callback.set_size(size)
253
+ self._raise_not_found_for_status(r, rpath)
254
+ if isfilelike(lpath):
255
+ outfile = lpath
256
+ else:
257
+ outfile = open(lpath, "wb") # noqa: ASYNC101, ASYNC230
258
+
259
+ try:
260
+ chunk = True
261
+ while chunk:
262
+ chunk = await r.content.read(chunk_size)
263
+ outfile.write(chunk)
264
+ callback.relative_update(len(chunk))
265
+ finally:
266
+ if not isfilelike(lpath):
267
+ outfile.close()
268
+
269
+ async def _put_file(
270
+ self,
271
+ lpath,
272
+ rpath,
273
+ chunk_size=5 * 2**20,
274
+ callback=DEFAULT_CALLBACK,
275
+ method="post",
276
+ mode="overwrite",
277
+ **kwargs,
278
+ ):
279
+ if mode != "overwrite":
280
+ raise NotImplementedError("Exclusive write")
281
+
282
+ async def gen_chunks():
283
+ # Support passing arbitrary file-like objects
284
+ # and use them instead of streams.
285
+ if isinstance(lpath, io.IOBase):
286
+ context = nullcontext(lpath)
287
+ use_seek = False # might not support seeking
288
+ else:
289
+ context = open(lpath, "rb") # noqa: ASYNC101, ASYNC230
290
+ use_seek = True
291
+
292
+ with context as f:
293
+ if use_seek:
294
+ callback.set_size(f.seek(0, 2))
295
+ f.seek(0)
296
+ else:
297
+ callback.set_size(getattr(f, "size", None))
298
+
299
+ chunk = f.read(chunk_size)
300
+ while chunk:
301
+ yield chunk
302
+ callback.relative_update(len(chunk))
303
+ chunk = f.read(chunk_size)
304
+
305
+ kw = self.kwargs.copy()
306
+ kw.update(kwargs)
307
+ session = await self.set_session()
308
+
309
+ method = method.lower()
310
+ if method not in ("post", "put"):
311
+ raise ValueError(
312
+ f"method has to be either 'post' or 'put', not: {method!r}"
313
+ )
314
+
315
+ meth = getattr(session, method)
316
+ async with meth(self.encode_url(rpath), data=gen_chunks(), **kw) as resp:
317
+ self._raise_not_found_for_status(resp, rpath)
318
+
319
+ async def _exists(self, path, **kwargs):
320
+ kw = self.kwargs.copy()
321
+ kw.update(kwargs)
322
+ try:
323
+ logger.debug(path)
324
+ session = await self.set_session()
325
+ r = await session.get(self.encode_url(path), **kw)
326
+ async with r:
327
+ return r.status < 400
328
+ except aiohttp.ClientError:
329
+ return False
330
+
331
+ async def _isfile(self, path, **kwargs):
332
+ return await self._exists(path, **kwargs)
333
+
334
+ def _open(
335
+ self,
336
+ path,
337
+ mode="rb",
338
+ block_size=None,
339
+ autocommit=None, # XXX: This differs from the base class.
340
+ cache_type=None,
341
+ cache_options=None,
342
+ size=None,
343
+ **kwargs,
344
+ ):
345
+ """Make a file-like object
346
+
347
+ Parameters
348
+ ----------
349
+ path: str
350
+ Full URL with protocol
351
+ mode: string
352
+ must be "rb"
353
+ block_size: int or None
354
+ Bytes to download in one request; use instance value if None. If
355
+ zero, will return a streaming Requests file-like instance.
356
+ kwargs: key-value
357
+ Any other parameters, passed to requests calls
358
+ """
359
+ if mode != "rb":
360
+ raise NotImplementedError
361
+ block_size = block_size if block_size is not None else self.block_size
362
+ kw = self.kwargs.copy()
363
+ kw["asynchronous"] = self.asynchronous
364
+ kw.update(kwargs)
365
+ info = {}
366
+ size = size or info.update(self.info(path, **kwargs)) or info["size"]
367
+ session = sync(self.loop, self.set_session)
368
+ if block_size and size and info.get("partial", True):
369
+ return HTTPFile(
370
+ self,
371
+ path,
372
+ session=session,
373
+ block_size=block_size,
374
+ mode=mode,
375
+ size=size,
376
+ cache_type=cache_type or self.cache_type,
377
+ cache_options=cache_options or self.cache_options,
378
+ loop=self.loop,
379
+ **kw,
380
+ )
381
+ else:
382
+ return HTTPStreamFile(
383
+ self,
384
+ path,
385
+ mode=mode,
386
+ loop=self.loop,
387
+ session=session,
388
+ **kw,
389
+ )
390
+
391
+ async def open_async(self, path, mode="rb", size=None, **kwargs):
392
+ session = await self.set_session()
393
+ if size is None:
394
+ try:
395
+ size = (await self._info(path, **kwargs))["size"]
396
+ except FileNotFoundError:
397
+ pass
398
+ return AsyncStreamFile(
399
+ self,
400
+ path,
401
+ loop=self.loop,
402
+ session=session,
403
+ size=size,
404
+ **kwargs,
405
+ )
406
+
407
+ def ukey(self, url):
408
+ """Unique identifier; assume HTTP files are static, unchanging"""
409
+ return tokenize(url, self.kwargs, self.protocol)
410
+
411
+ async def _info(self, url, **kwargs):
412
+ """Get info of URL
413
+
414
+ Tries to access location via HEAD, and then GET methods, but does
415
+ not fetch the data.
416
+
417
+ It is possible that the server does not supply any size information, in
418
+ which case size will be given as None (and certain operations on the
419
+ corresponding file will not work).
420
+ """
421
+ info = {}
422
+ session = await self.set_session()
423
+
424
+ for policy in ["head", "get"]:
425
+ try:
426
+ info.update(
427
+ await _file_info(
428
+ self.encode_url(url),
429
+ size_policy=policy,
430
+ session=session,
431
+ **self.kwargs,
432
+ **kwargs,
433
+ )
434
+ )
435
+ if info.get("size") is not None:
436
+ break
437
+ except Exception as exc:
438
+ if policy == "get":
439
+ # If get failed, then raise a FileNotFoundError
440
+ raise FileNotFoundError(url) from exc
441
+ logger.debug("", exc_info=exc)
442
+
443
+ return {"name": url, "size": None, **info, "type": "file"}
444
+
445
+ async def _glob(self, path, maxdepth=None, **kwargs):
446
+ """
447
+ Find files by glob-matching.
448
+
449
+ This implementation is idntical to the one in AbstractFileSystem,
450
+ but "?" is not considered as a character for globbing, because it is
451
+ so common in URLs, often identifying the "query" part.
452
+ """
453
+ if maxdepth is not None and maxdepth < 1:
454
+ raise ValueError("maxdepth must be at least 1")
455
+ import re
456
+
457
+ ends_with_slash = path.endswith("/") # _strip_protocol strips trailing slash
458
+ path = self._strip_protocol(path)
459
+ append_slash_to_dirname = ends_with_slash or path.endswith(("/**", "/*"))
460
+ idx_star = path.find("*") if path.find("*") >= 0 else len(path)
461
+ idx_brace = path.find("[") if path.find("[") >= 0 else len(path)
462
+
463
+ min_idx = min(idx_star, idx_brace)
464
+
465
+ detail = kwargs.pop("detail", False)
466
+
467
+ if not has_magic(path):
468
+ if await self._exists(path, **kwargs):
469
+ if not detail:
470
+ return [path]
471
+ else:
472
+ return {path: await self._info(path, **kwargs)}
473
+ else:
474
+ if not detail:
475
+ return [] # glob of non-existent returns empty
476
+ else:
477
+ return {}
478
+ elif "/" in path[:min_idx]:
479
+ min_idx = path[:min_idx].rindex("/")
480
+ root = path[: min_idx + 1]
481
+ depth = path[min_idx + 1 :].count("/") + 1
482
+ else:
483
+ root = ""
484
+ depth = path[min_idx + 1 :].count("/") + 1
485
+
486
+ if "**" in path:
487
+ if maxdepth is not None:
488
+ idx_double_stars = path.find("**")
489
+ depth_double_stars = path[idx_double_stars:].count("/") + 1
490
+ depth = depth - depth_double_stars + maxdepth
491
+ else:
492
+ depth = None
493
+
494
+ allpaths = await self._find(
495
+ root, maxdepth=depth, withdirs=True, detail=True, **kwargs
496
+ )
497
+
498
+ pattern = glob_translate(path + ("/" if ends_with_slash else ""))
499
+ pattern = re.compile(pattern)
500
+
501
+ out = {
502
+ (
503
+ p.rstrip("/")
504
+ if not append_slash_to_dirname
505
+ and info["type"] == "directory"
506
+ and p.endswith("/")
507
+ else p
508
+ ): info
509
+ for p, info in sorted(allpaths.items())
510
+ if pattern.match(p.rstrip("/"))
511
+ }
512
+
513
+ if detail:
514
+ return out
515
+ else:
516
+ return list(out)
517
+
518
+ async def _isdir(self, path):
519
+ # override, since all URLs are (also) files
520
+ try:
521
+ return bool(await self._ls(path))
522
+ except (FileNotFoundError, ValueError):
523
+ return False
524
+
525
+
526
+ class HTTPFile(AbstractBufferedFile):
527
+ """
528
+ A file-like object pointing to a remote HTTP(S) resource
529
+
530
+ Supports only reading, with read-ahead of a predetermined block-size.
531
+
532
+ In the case that the server does not supply the filesize, only reading of
533
+ the complete file in one go is supported.
534
+
535
+ Parameters
536
+ ----------
537
+ url: str
538
+ Full URL of the remote resource, including the protocol
539
+ session: aiohttp.ClientSession or None
540
+ All calls will be made within this session, to avoid restarting
541
+ connections where the server allows this
542
+ block_size: int or None
543
+ The amount of read-ahead to do, in bytes. Default is 5MB, or the value
544
+ configured for the FileSystem creating this file
545
+ size: None or int
546
+ If given, this is the size of the file in bytes, and we don't attempt
547
+ to call the server to find the value.
548
+ kwargs: all other key-values are passed to requests calls.
549
+ """
550
+
551
+ def __init__(
552
+ self,
553
+ fs,
554
+ url,
555
+ session=None,
556
+ block_size=None,
557
+ mode="rb",
558
+ cache_type="bytes",
559
+ cache_options=None,
560
+ size=None,
561
+ loop=None,
562
+ asynchronous=False,
563
+ **kwargs,
564
+ ):
565
+ if mode != "rb":
566
+ raise NotImplementedError("File mode not supported")
567
+ self.asynchronous = asynchronous
568
+ self.loop = loop
569
+ self.url = url
570
+ self.session = session
571
+ self.details = {"name": url, "size": size, "type": "file"}
572
+ super().__init__(
573
+ fs=fs,
574
+ path=url,
575
+ mode=mode,
576
+ block_size=block_size,
577
+ cache_type=cache_type,
578
+ cache_options=cache_options,
579
+ **kwargs,
580
+ )
581
+
582
+ def read(self, length=-1):
583
+ """Read bytes from file
584
+
585
+ Parameters
586
+ ----------
587
+ length: int
588
+ Read up to this many bytes. If negative, read all content to end of
589
+ file. If the server has not supplied the filesize, attempting to
590
+ read only part of the data will raise a ValueError.
591
+ """
592
+ if (
593
+ (length < 0 and self.loc == 0) # explicit read all
594
+ # but not when the size is known and fits into a block anyways
595
+ and not (self.size is not None and self.size <= self.blocksize)
596
+ ):
597
+ self._fetch_all()
598
+ if self.size is None:
599
+ if length < 0:
600
+ self._fetch_all()
601
+ else:
602
+ length = min(self.size - self.loc, length)
603
+ return super().read(length)
604
+
605
+ async def async_fetch_all(self):
606
+ """Read whole file in one shot, without caching
607
+
608
+ This is only called when position is still at zero,
609
+ and read() is called without a byte-count.
610
+ """
611
+ logger.debug(f"Fetch all for {self}")
612
+ if not isinstance(self.cache, AllBytes):
613
+ r = await self.session.get(self.fs.encode_url(self.url), **self.kwargs)
614
+ async with r:
615
+ r.raise_for_status()
616
+ out = await r.read()
617
+ self.cache = AllBytes(
618
+ size=len(out), fetcher=None, blocksize=None, data=out
619
+ )
620
+ self.size = len(out)
621
+
622
+ _fetch_all = sync_wrapper(async_fetch_all)
623
+
624
+ def _parse_content_range(self, headers):
625
+ """Parse the Content-Range header"""
626
+ s = headers.get("Content-Range", "")
627
+ m = re.match(r"bytes (\d+-\d+|\*)/(\d+|\*)", s)
628
+ if not m:
629
+ return None, None, None
630
+
631
+ if m[1] == "*":
632
+ start = end = None
633
+ else:
634
+ start, end = [int(x) for x in m[1].split("-")]
635
+ total = None if m[2] == "*" else int(m[2])
636
+ return start, end, total
637
+
638
+ async def async_fetch_range(self, start, end):
639
+ """Download a block of data
640
+
641
+ The expectation is that the server returns only the requested bytes,
642
+ with HTTP code 206. If this is not the case, we first check the headers,
643
+ and then stream the output - if the data size is bigger than we
644
+ requested, an exception is raised.
645
+ """
646
+ logger.debug(f"Fetch range for {self}: {start}-{end}")
647
+ kwargs = self.kwargs.copy()
648
+ headers = kwargs.pop("headers", {}).copy()
649
+ headers["Range"] = f"bytes={start}-{end - 1}"
650
+ logger.debug(f"{self.url} : {headers['Range']}")
651
+ r = await self.session.get(
652
+ self.fs.encode_url(self.url), headers=headers, **kwargs
653
+ )
654
+ async with r:
655
+ if r.status == 416:
656
+ # range request outside file
657
+ return b""
658
+ r.raise_for_status()
659
+
660
+ # If the server has handled the range request, it should reply
661
+ # with status 206 (partial content). But we'll guess that a suitable
662
+ # Content-Range header or a Content-Length no more than the
663
+ # requested range also mean we have got the desired range.
664
+ response_is_range = (
665
+ r.status == 206
666
+ or self._parse_content_range(r.headers)[0] == start
667
+ or int(r.headers.get("Content-Length", end + 1)) <= end - start
668
+ )
669
+
670
+ if response_is_range:
671
+ # partial content, as expected
672
+ out = await r.read()
673
+ elif start > 0:
674
+ raise ValueError(
675
+ "The HTTP server doesn't appear to support range requests. "
676
+ "Only reading this file from the beginning is supported. "
677
+ "Open with block_size=0 for a streaming file interface."
678
+ )
679
+ else:
680
+ # Response is not a range, but we want the start of the file,
681
+ # so we can read the required amount anyway.
682
+ cl = 0
683
+ out = []
684
+ while True:
685
+ chunk = await r.content.read(2**20)
686
+ # data size unknown, let's read until we have enough
687
+ if chunk:
688
+ out.append(chunk)
689
+ cl += len(chunk)
690
+ if cl > end - start:
691
+ break
692
+ else:
693
+ break
694
+ out = b"".join(out)[: end - start]
695
+ return out
696
+
697
+ _fetch_range = sync_wrapper(async_fetch_range)
698
+
699
+
700
+ magic_check = re.compile("([*[])")
701
+
702
+
703
+ def has_magic(s):
704
+ match = magic_check.search(s)
705
+ return match is not None
706
+
707
+
708
+ class HTTPStreamFile(AbstractBufferedFile):
709
+ def __init__(self, fs, url, mode="rb", loop=None, session=None, **kwargs):
710
+ self.asynchronous = kwargs.pop("asynchronous", False)
711
+ self.url = url
712
+ self.loop = loop
713
+ self.session = session
714
+ if mode != "rb":
715
+ raise ValueError
716
+ self.details = {"name": url, "size": None}
717
+ super().__init__(fs=fs, path=url, mode=mode, cache_type="none", **kwargs)
718
+
719
+ async def cor():
720
+ r = await self.session.get(self.fs.encode_url(url), **kwargs).__aenter__()
721
+ self.fs._raise_not_found_for_status(r, url)
722
+ return r
723
+
724
+ self.r = sync(self.loop, cor)
725
+ self.loop = fs.loop
726
+
727
+ def seek(self, loc, whence=0):
728
+ if loc == 0 and whence == 1:
729
+ return
730
+ if loc == self.loc and whence == 0:
731
+ return
732
+ raise ValueError("Cannot seek streaming HTTP file")
733
+
734
+ async def _read(self, num=-1):
735
+ out = await self.r.content.read(num)
736
+ self.loc += len(out)
737
+ return out
738
+
739
+ read = sync_wrapper(_read)
740
+
741
+ async def _close(self):
742
+ self.r.close()
743
+
744
+ def close(self):
745
+ asyncio.run_coroutine_threadsafe(self._close(), self.loop)
746
+ super().close()
747
+
748
+
749
+ class AsyncStreamFile(AbstractAsyncStreamedFile):
750
+ def __init__(
751
+ self, fs, url, mode="rb", loop=None, session=None, size=None, **kwargs
752
+ ):
753
+ self.url = url
754
+ self.session = session
755
+ self.r = None
756
+ if mode != "rb":
757
+ raise ValueError
758
+ self.details = {"name": url, "size": None}
759
+ self.kwargs = kwargs
760
+ super().__init__(fs=fs, path=url, mode=mode, cache_type="none")
761
+ self.size = size
762
+
763
+ async def read(self, num=-1):
764
+ if self.r is None:
765
+ r = await self.session.get(
766
+ self.fs.encode_url(self.url), **self.kwargs
767
+ ).__aenter__()
768
+ self.fs._raise_not_found_for_status(r, self.url)
769
+ self.r = r
770
+ out = await self.r.content.read(num)
771
+ self.loc += len(out)
772
+ return out
773
+
774
+ async def close(self):
775
+ if self.r is not None:
776
+ self.r.close()
777
+ self.r = None
778
+ await super().close()
779
+
780
+
781
+ async def get_range(session, url, start, end, file=None, **kwargs):
782
+ # explicit get a range when we know it must be safe
783
+ kwargs = kwargs.copy()
784
+ headers = kwargs.pop("headers", {}).copy()
785
+ headers["Range"] = f"bytes={start}-{end - 1}"
786
+ r = await session.get(url, headers=headers, **kwargs)
787
+ r.raise_for_status()
788
+ async with r:
789
+ out = await r.read()
790
+ if file:
791
+ with open(file, "r+b") as f: # noqa: ASYNC101, ASYNC230
792
+ f.seek(start)
793
+ f.write(out)
794
+ else:
795
+ return out
796
+
797
+
798
+ async def _file_info(url, session, size_policy="head", **kwargs):
799
+ """Call HEAD on the server to get details about the file (size/checksum etc.)
800
+
801
+ Default operation is to explicitly allow redirects and use encoding
802
+ 'identity' (no compression) to get the true size of the target.
803
+ """
804
+ logger.debug("Retrieve file size for %s", url)
805
+ kwargs = kwargs.copy()
806
+ ar = kwargs.pop("allow_redirects", True)
807
+ head = kwargs.get("headers", {}).copy()
808
+ head["Accept-Encoding"] = "identity"
809
+ kwargs["headers"] = head
810
+
811
+ info = {}
812
+ if size_policy == "head":
813
+ r = await session.head(url, allow_redirects=ar, **kwargs)
814
+ elif size_policy == "get":
815
+ r = await session.get(url, allow_redirects=ar, **kwargs)
816
+ else:
817
+ raise TypeError(f'size_policy must be "head" or "get", got {size_policy}')
818
+ async with r:
819
+ r.raise_for_status()
820
+
821
+ if "Content-Length" in r.headers:
822
+ # Some servers may choose to ignore Accept-Encoding and return
823
+ # compressed content, in which case the returned size is unreliable.
824
+ if "Content-Encoding" not in r.headers or r.headers["Content-Encoding"] in [
825
+ "identity",
826
+ "",
827
+ ]:
828
+ info["size"] = int(r.headers["Content-Length"])
829
+ elif "Content-Range" in r.headers:
830
+ info["size"] = int(r.headers["Content-Range"].split("/")[1])
831
+
832
+ if "Content-Type" in r.headers:
833
+ info["mimetype"] = r.headers["Content-Type"].partition(";")[0]
834
+
835
+ if r.headers.get("Accept-Ranges") == "none":
836
+ # Some servers may explicitly discourage partial content requests, but
837
+ # the lack of "Accept-Ranges" does not always indicate they would fail
838
+ info["partial"] = False
839
+
840
+ info["url"] = str(r.url)
841
+
842
+ for checksum_field in ["ETag", "Content-MD5", "Digest"]:
843
+ if r.headers.get(checksum_field):
844
+ info[checksum_field] = r.headers[checksum_field]
845
+
846
+ return info
847
+
848
+
849
+ async def _file_size(url, session=None, *args, **kwargs):
850
+ if session is None:
851
+ session = await get_client()
852
+ info = await _file_info(url, session=session, *args, **kwargs)
853
+ return info.get("size")
854
+
855
+
856
+ file_size = sync_wrapper(_file_size)
mgm/lib/python3.10/site-packages/fsspec/implementations/jupyter.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ import io
3
+ import re
4
+
5
+ import requests
6
+
7
+ import fsspec
8
+
9
+
10
+ class JupyterFileSystem(fsspec.AbstractFileSystem):
11
+ """View of the files as seen by a Jupyter server (notebook or lab)"""
12
+
13
+ protocol = ("jupyter", "jlab")
14
+
15
+ def __init__(self, url, tok=None, **kwargs):
16
+ """
17
+
18
+ Parameters
19
+ ----------
20
+ url : str
21
+ Base URL of the server, like "http://127.0.0.1:8888". May include
22
+ token in the string, which is given by the process when starting up
23
+ tok : str
24
+ If the token is obtained separately, can be given here
25
+ kwargs
26
+ """
27
+ if "?" in url:
28
+ if tok is None:
29
+ try:
30
+ tok = re.findall("token=([a-z0-9]+)", url)[0]
31
+ except IndexError as e:
32
+ raise ValueError("Could not determine token") from e
33
+ url = url.split("?", 1)[0]
34
+ self.url = url.rstrip("/") + "/api/contents"
35
+ self.session = requests.Session()
36
+ if tok:
37
+ self.session.headers["Authorization"] = f"token {tok}"
38
+
39
+ super().__init__(**kwargs)
40
+
41
+ def ls(self, path, detail=True, **kwargs):
42
+ path = self._strip_protocol(path)
43
+ r = self.session.get(f"{self.url}/{path}")
44
+ if r.status_code == 404:
45
+ return FileNotFoundError(path)
46
+ r.raise_for_status()
47
+ out = r.json()
48
+
49
+ if out["type"] == "directory":
50
+ out = out["content"]
51
+ else:
52
+ out = [out]
53
+ for o in out:
54
+ o["name"] = o.pop("path")
55
+ o.pop("content")
56
+ if o["type"] == "notebook":
57
+ o["type"] = "file"
58
+ if detail:
59
+ return out
60
+ return [o["name"] for o in out]
61
+
62
+ def cat_file(self, path, start=None, end=None, **kwargs):
63
+ path = self._strip_protocol(path)
64
+ r = self.session.get(f"{self.url}/{path}")
65
+ if r.status_code == 404:
66
+ return FileNotFoundError(path)
67
+ r.raise_for_status()
68
+ out = r.json()
69
+ if out["format"] == "text":
70
+ # data should be binary
71
+ b = out["content"].encode()
72
+ else:
73
+ b = base64.b64decode(out["content"])
74
+ return b[start:end]
75
+
76
+ def pipe_file(self, path, value, **_):
77
+ path = self._strip_protocol(path)
78
+ json = {
79
+ "name": path.rsplit("/", 1)[-1],
80
+ "path": path,
81
+ "size": len(value),
82
+ "content": base64.b64encode(value).decode(),
83
+ "format": "base64",
84
+ "type": "file",
85
+ }
86
+ self.session.put(f"{self.url}/{path}", json=json)
87
+
88
+ def mkdir(self, path, create_parents=True, **kwargs):
89
+ path = self._strip_protocol(path)
90
+ if create_parents and "/" in path:
91
+ self.mkdir(path.rsplit("/", 1)[0], True)
92
+ json = {
93
+ "name": path.rsplit("/", 1)[-1],
94
+ "path": path,
95
+ "size": None,
96
+ "content": None,
97
+ "type": "directory",
98
+ }
99
+ self.session.put(f"{self.url}/{path}", json=json)
100
+
101
+ def _rm(self, path):
102
+ path = self._strip_protocol(path)
103
+ self.session.delete(f"{self.url}/{path}")
104
+
105
+ def _open(self, path, mode="rb", **kwargs):
106
+ path = self._strip_protocol(path)
107
+ if mode == "rb":
108
+ data = self.cat_file(path)
109
+ return io.BytesIO(data)
110
+ else:
111
+ return SimpleFileWriter(self, path, mode="wb")
112
+
113
+
114
+ class SimpleFileWriter(fsspec.spec.AbstractBufferedFile):
115
+ def _upload_chunk(self, final=False):
116
+ """Never uploads a chunk until file is done
117
+
118
+ Not suitable for large files
119
+ """
120
+ if final is False:
121
+ return False
122
+ self.buffer.seek(0)
123
+ data = self.buffer.read()
124
+ self.fs.pipe_file(self.path, data)
mgm/lib/python3.10/site-packages/fsspec/implementations/libarchive.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from contextlib import contextmanager
2
+ from ctypes import (
3
+ CFUNCTYPE,
4
+ POINTER,
5
+ c_int,
6
+ c_longlong,
7
+ c_void_p,
8
+ cast,
9
+ create_string_buffer,
10
+ )
11
+
12
+ import libarchive
13
+ import libarchive.ffi as ffi
14
+
15
+ from fsspec import open_files
16
+ from fsspec.archive import AbstractArchiveFileSystem
17
+ from fsspec.implementations.memory import MemoryFile
18
+ from fsspec.utils import DEFAULT_BLOCK_SIZE
19
+
20
+ # Libarchive requires seekable files or memory only for certain archive
21
+ # types. However, since we read the directory first to cache the contents
22
+ # and also allow random access to any file, the file-like object needs
23
+ # to be seekable no matter what.
24
+
25
+ # Seek call-backs (not provided in the libarchive python wrapper)
26
+ SEEK_CALLBACK = CFUNCTYPE(c_longlong, c_int, c_void_p, c_longlong, c_int)
27
+ read_set_seek_callback = ffi.ffi(
28
+ "read_set_seek_callback", [ffi.c_archive_p, SEEK_CALLBACK], c_int, ffi.check_int
29
+ )
30
+ new_api = hasattr(ffi, "NO_OPEN_CB")
31
+
32
+
33
+ @contextmanager
34
+ def custom_reader(file, format_name="all", filter_name="all", block_size=ffi.page_size):
35
+ """Read an archive from a seekable file-like object.
36
+
37
+ The `file` object must support the standard `readinto` and 'seek' methods.
38
+ """
39
+ buf = create_string_buffer(block_size)
40
+ buf_p = cast(buf, c_void_p)
41
+
42
+ def read_func(archive_p, context, ptrptr):
43
+ # readinto the buffer, returns number of bytes read
44
+ length = file.readinto(buf)
45
+ # write the address of the buffer into the pointer
46
+ ptrptr = cast(ptrptr, POINTER(c_void_p))
47
+ ptrptr[0] = buf_p
48
+ # tell libarchive how much data was written into the buffer
49
+ return length
50
+
51
+ def seek_func(archive_p, context, offset, whence):
52
+ file.seek(offset, whence)
53
+ # tell libarchvie the current position
54
+ return file.tell()
55
+
56
+ read_cb = ffi.READ_CALLBACK(read_func)
57
+ seek_cb = SEEK_CALLBACK(seek_func)
58
+
59
+ if new_api:
60
+ open_cb = ffi.NO_OPEN_CB
61
+ close_cb = ffi.NO_CLOSE_CB
62
+ else:
63
+ open_cb = libarchive.read.OPEN_CALLBACK(ffi.VOID_CB)
64
+ close_cb = libarchive.read.CLOSE_CALLBACK(ffi.VOID_CB)
65
+
66
+ with libarchive.read.new_archive_read(format_name, filter_name) as archive_p:
67
+ read_set_seek_callback(archive_p, seek_cb)
68
+ ffi.read_open(archive_p, None, open_cb, read_cb, close_cb)
69
+ yield libarchive.read.ArchiveRead(archive_p)
70
+
71
+
72
+ class LibArchiveFileSystem(AbstractArchiveFileSystem):
73
+ """Compressed archives as a file-system (read-only)
74
+
75
+ Supports the following formats:
76
+ tar, pax , cpio, ISO9660, zip, mtree, shar, ar, raw, xar, lha/lzh, rar
77
+ Microsoft CAB, 7-Zip, WARC
78
+
79
+ See the libarchive documentation for further restrictions.
80
+ https://www.libarchive.org/
81
+
82
+ Keeps file object open while instance lives. It only works in seekable
83
+ file-like objects. In case the filesystem does not support this kind of
84
+ file object, it is recommended to cache locally.
85
+
86
+ This class is pickleable, but not necessarily thread-safe (depends on the
87
+ platform). See libarchive documentation for details.
88
+ """
89
+
90
+ root_marker = ""
91
+ protocol = "libarchive"
92
+ cachable = False
93
+
94
+ def __init__(
95
+ self,
96
+ fo="",
97
+ mode="r",
98
+ target_protocol=None,
99
+ target_options=None,
100
+ block_size=DEFAULT_BLOCK_SIZE,
101
+ **kwargs,
102
+ ):
103
+ """
104
+ Parameters
105
+ ----------
106
+ fo: str or file-like
107
+ Contains ZIP, and must exist. If a str, will fetch file using
108
+ :meth:`~fsspec.open_files`, which must return one file exactly.
109
+ mode: str
110
+ Currently, only 'r' accepted
111
+ target_protocol: str (optional)
112
+ If ``fo`` is a string, this value can be used to override the
113
+ FS protocol inferred from a URL
114
+ target_options: dict (optional)
115
+ Kwargs passed when instantiating the target FS, if ``fo`` is
116
+ a string.
117
+ """
118
+ super().__init__(self, **kwargs)
119
+ if mode != "r":
120
+ raise ValueError("Only read from archive files accepted")
121
+ if isinstance(fo, str):
122
+ files = open_files(fo, protocol=target_protocol, **(target_options or {}))
123
+ if len(files) != 1:
124
+ raise ValueError(
125
+ f'Path "{fo}" did not resolve to exactly one file: "{files}"'
126
+ )
127
+ fo = files[0]
128
+ self.of = fo
129
+ self.fo = fo.__enter__() # the whole instance is a context
130
+ self.block_size = block_size
131
+ self.dir_cache = None
132
+
133
+ @contextmanager
134
+ def _open_archive(self):
135
+ self.fo.seek(0)
136
+ with custom_reader(self.fo, block_size=self.block_size) as arc:
137
+ yield arc
138
+
139
+ @classmethod
140
+ def _strip_protocol(cls, path):
141
+ # file paths are always relative to the archive root
142
+ return super()._strip_protocol(path).lstrip("/")
143
+
144
+ def _get_dirs(self):
145
+ fields = {
146
+ "name": "pathname",
147
+ "size": "size",
148
+ "created": "ctime",
149
+ "mode": "mode",
150
+ "uid": "uid",
151
+ "gid": "gid",
152
+ "mtime": "mtime",
153
+ }
154
+
155
+ if self.dir_cache is not None:
156
+ return
157
+
158
+ self.dir_cache = {}
159
+ list_names = []
160
+ with self._open_archive() as arc:
161
+ for entry in arc:
162
+ if not entry.isdir and not entry.isfile:
163
+ # Skip symbolic links, fifo entries, etc.
164
+ continue
165
+ self.dir_cache.update(
166
+ {
167
+ dirname: {"name": dirname, "size": 0, "type": "directory"}
168
+ for dirname in self._all_dirnames(set(entry.name))
169
+ }
170
+ )
171
+ f = {key: getattr(entry, fields[key]) for key in fields}
172
+ f["type"] = "directory" if entry.isdir else "file"
173
+ list_names.append(entry.name)
174
+
175
+ self.dir_cache[f["name"]] = f
176
+ # libarchive does not seem to return an entry for the directories (at least
177
+ # not in all formats), so get the directories names from the files names
178
+ self.dir_cache.update(
179
+ {
180
+ dirname: {"name": dirname, "size": 0, "type": "directory"}
181
+ for dirname in self._all_dirnames(list_names)
182
+ }
183
+ )
184
+
185
+ def _open(
186
+ self,
187
+ path,
188
+ mode="rb",
189
+ block_size=None,
190
+ autocommit=True,
191
+ cache_options=None,
192
+ **kwargs,
193
+ ):
194
+ path = self._strip_protocol(path)
195
+ if mode != "rb":
196
+ raise NotImplementedError
197
+
198
+ data = bytes()
199
+ with self._open_archive() as arc:
200
+ for entry in arc:
201
+ if entry.pathname != path:
202
+ continue
203
+
204
+ if entry.size == 0:
205
+ # empty file, so there are no blocks
206
+ break
207
+
208
+ for block in entry.get_blocks(entry.size):
209
+ data = block
210
+ break
211
+ else:
212
+ raise ValueError
213
+ return MemoryFile(fs=self, path=path, data=data)
mgm/lib/python3.10/site-packages/fsspec/implementations/local.py ADDED
@@ -0,0 +1,476 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datetime
2
+ import io
3
+ import logging
4
+ import os
5
+ import os.path as osp
6
+ import shutil
7
+ import stat
8
+ import tempfile
9
+
10
+ from fsspec import AbstractFileSystem
11
+ from fsspec.compression import compr
12
+ from fsspec.core import get_compression
13
+ from fsspec.utils import isfilelike, stringify_path
14
+
15
+ logger = logging.getLogger("fsspec.local")
16
+
17
+
18
+ class LocalFileSystem(AbstractFileSystem):
19
+ """Interface to files on local storage
20
+
21
+ Parameters
22
+ ----------
23
+ auto_mkdir: bool
24
+ Whether, when opening a file, the directory containing it should
25
+ be created (if it doesn't already exist). This is assumed by pyarrow
26
+ code.
27
+ """
28
+
29
+ root_marker = "/"
30
+ protocol = "file", "local"
31
+ local_file = True
32
+
33
+ def __init__(self, auto_mkdir=False, **kwargs):
34
+ super().__init__(**kwargs)
35
+ self.auto_mkdir = auto_mkdir
36
+
37
+ @property
38
+ def fsid(self):
39
+ return "local"
40
+
41
+ def mkdir(self, path, create_parents=True, **kwargs):
42
+ path = self._strip_protocol(path)
43
+ if self.exists(path):
44
+ raise FileExistsError(path)
45
+ if create_parents:
46
+ self.makedirs(path, exist_ok=True)
47
+ else:
48
+ os.mkdir(path, **kwargs)
49
+
50
+ def makedirs(self, path, exist_ok=False):
51
+ path = self._strip_protocol(path)
52
+ os.makedirs(path, exist_ok=exist_ok)
53
+
54
+ def rmdir(self, path):
55
+ path = self._strip_protocol(path)
56
+ os.rmdir(path)
57
+
58
+ def ls(self, path, detail=False, **kwargs):
59
+ path = self._strip_protocol(path)
60
+ info = self.info(path)
61
+ if info["type"] == "directory":
62
+ with os.scandir(path) as it:
63
+ infos = []
64
+ for f in it:
65
+ try:
66
+ infos.append(self.info(f))
67
+ except FileNotFoundError:
68
+ pass
69
+ else:
70
+ infos = [info]
71
+
72
+ if not detail:
73
+ return [i["name"] for i in infos]
74
+ return infos
75
+
76
+ def info(self, path, **kwargs):
77
+ if isinstance(path, os.DirEntry):
78
+ # scandir DirEntry
79
+ out = path.stat(follow_symlinks=False)
80
+ link = path.is_symlink()
81
+ if path.is_dir(follow_symlinks=False):
82
+ t = "directory"
83
+ elif path.is_file(follow_symlinks=False):
84
+ t = "file"
85
+ else:
86
+ t = "other"
87
+
88
+ size = out.st_size
89
+ if link:
90
+ try:
91
+ out2 = path.stat(follow_symlinks=True)
92
+ size = out2.st_size
93
+ except OSError:
94
+ size = 0
95
+ path = self._strip_protocol(path.path)
96
+ else:
97
+ # str or path-like
98
+ path = self._strip_protocol(path)
99
+ out = os.stat(path, follow_symlinks=False)
100
+ link = stat.S_ISLNK(out.st_mode)
101
+ if link:
102
+ out = os.stat(path, follow_symlinks=True)
103
+ size = out.st_size
104
+ if stat.S_ISDIR(out.st_mode):
105
+ t = "directory"
106
+ elif stat.S_ISREG(out.st_mode):
107
+ t = "file"
108
+ else:
109
+ t = "other"
110
+ result = {
111
+ "name": path,
112
+ "size": size,
113
+ "type": t,
114
+ "created": out.st_ctime,
115
+ "islink": link,
116
+ }
117
+ for field in ["mode", "uid", "gid", "mtime", "ino", "nlink"]:
118
+ result[field] = getattr(out, f"st_{field}")
119
+ if link:
120
+ result["destination"] = os.readlink(path)
121
+ return result
122
+
123
+ def lexists(self, path, **kwargs):
124
+ return osp.lexists(path)
125
+
126
+ def cp_file(self, path1, path2, **kwargs):
127
+ path1 = self._strip_protocol(path1)
128
+ path2 = self._strip_protocol(path2)
129
+ if self.auto_mkdir:
130
+ self.makedirs(self._parent(path2), exist_ok=True)
131
+ if self.isfile(path1):
132
+ shutil.copyfile(path1, path2)
133
+ elif self.isdir(path1):
134
+ self.mkdirs(path2, exist_ok=True)
135
+ else:
136
+ raise FileNotFoundError(path1)
137
+
138
+ def isfile(self, path):
139
+ path = self._strip_protocol(path)
140
+ return os.path.isfile(path)
141
+
142
+ def isdir(self, path):
143
+ path = self._strip_protocol(path)
144
+ return os.path.isdir(path)
145
+
146
+ def get_file(self, path1, path2, callback=None, **kwargs):
147
+ if isfilelike(path2):
148
+ with open(path1, "rb") as f:
149
+ shutil.copyfileobj(f, path2)
150
+ else:
151
+ return self.cp_file(path1, path2, **kwargs)
152
+
153
+ def put_file(self, path1, path2, callback=None, **kwargs):
154
+ return self.cp_file(path1, path2, **kwargs)
155
+
156
+ def mv(self, path1, path2, **kwargs):
157
+ path1 = self._strip_protocol(path1)
158
+ path2 = self._strip_protocol(path2)
159
+ shutil.move(path1, path2)
160
+
161
+ def link(self, src, dst, **kwargs):
162
+ src = self._strip_protocol(src)
163
+ dst = self._strip_protocol(dst)
164
+ os.link(src, dst, **kwargs)
165
+
166
+ def symlink(self, src, dst, **kwargs):
167
+ src = self._strip_protocol(src)
168
+ dst = self._strip_protocol(dst)
169
+ os.symlink(src, dst, **kwargs)
170
+
171
+ def islink(self, path) -> bool:
172
+ return os.path.islink(self._strip_protocol(path))
173
+
174
+ def rm_file(self, path):
175
+ os.remove(self._strip_protocol(path))
176
+
177
+ def rm(self, path, recursive=False, maxdepth=None):
178
+ if not isinstance(path, list):
179
+ path = [path]
180
+
181
+ for p in path:
182
+ p = self._strip_protocol(p)
183
+ if self.isdir(p):
184
+ if not recursive:
185
+ raise ValueError("Cannot delete directory, set recursive=True")
186
+ if osp.abspath(p) == os.getcwd():
187
+ raise ValueError("Cannot delete current working directory")
188
+ shutil.rmtree(p)
189
+ else:
190
+ os.remove(p)
191
+
192
+ def unstrip_protocol(self, name):
193
+ name = self._strip_protocol(name) # normalise for local/win/...
194
+ return f"file://{name}"
195
+
196
+ def _open(self, path, mode="rb", block_size=None, **kwargs):
197
+ path = self._strip_protocol(path)
198
+ if self.auto_mkdir and "w" in mode:
199
+ self.makedirs(self._parent(path), exist_ok=True)
200
+ return LocalFileOpener(path, mode, fs=self, **kwargs)
201
+
202
+ def touch(self, path, truncate=True, **kwargs):
203
+ path = self._strip_protocol(path)
204
+ if self.auto_mkdir:
205
+ self.makedirs(self._parent(path), exist_ok=True)
206
+ if self.exists(path):
207
+ os.utime(path, None)
208
+ else:
209
+ open(path, "a").close()
210
+ if truncate:
211
+ os.truncate(path, 0)
212
+
213
+ def created(self, path):
214
+ info = self.info(path=path)
215
+ return datetime.datetime.fromtimestamp(
216
+ info["created"], tz=datetime.timezone.utc
217
+ )
218
+
219
+ def modified(self, path):
220
+ info = self.info(path=path)
221
+ return datetime.datetime.fromtimestamp(info["mtime"], tz=datetime.timezone.utc)
222
+
223
+ @classmethod
224
+ def _parent(cls, path):
225
+ path = cls._strip_protocol(path)
226
+ if os.sep == "/":
227
+ # posix native
228
+ return path.rsplit("/", 1)[0] or "/"
229
+ else:
230
+ # NT
231
+ path_ = path.rsplit("/", 1)[0]
232
+ if len(path_) <= 3:
233
+ if path_[1:2] == ":":
234
+ # nt root (something like c:/)
235
+ return path_[0] + ":/"
236
+ # More cases may be required here
237
+ return path_
238
+
239
+ @classmethod
240
+ def _strip_protocol(cls, path):
241
+ path = stringify_path(path)
242
+ if path.startswith("file://"):
243
+ path = path[7:]
244
+ elif path.startswith("file:"):
245
+ path = path[5:]
246
+ elif path.startswith("local://"):
247
+ path = path[8:]
248
+ elif path.startswith("local:"):
249
+ path = path[6:]
250
+
251
+ path = make_path_posix(path)
252
+ if os.sep != "/":
253
+ # This code-path is a stripped down version of
254
+ # > drive, path = ntpath.splitdrive(path)
255
+ if path[1:2] == ":":
256
+ # Absolute drive-letter path, e.g. X:\Windows
257
+ # Relative path with drive, e.g. X:Windows
258
+ drive, path = path[:2], path[2:]
259
+ elif path[:2] == "//":
260
+ # UNC drives, e.g. \\server\share or \\?\UNC\server\share
261
+ # Device drives, e.g. \\.\device or \\?\device
262
+ if (index1 := path.find("/", 2)) == -1 or (
263
+ index2 := path.find("/", index1 + 1)
264
+ ) == -1:
265
+ drive, path = path, ""
266
+ else:
267
+ drive, path = path[:index2], path[index2:]
268
+ else:
269
+ # Relative path, e.g. Windows
270
+ drive = ""
271
+
272
+ path = path.rstrip("/") or cls.root_marker
273
+ return drive + path
274
+
275
+ else:
276
+ return path.rstrip("/") or cls.root_marker
277
+
278
+ def _isfilestore(self):
279
+ # Inheriting from DaskFileSystem makes this False (S3, etc. were)
280
+ # the original motivation. But we are a posix-like file system.
281
+ # See https://github.com/dask/dask/issues/5526
282
+ return True
283
+
284
+ def chmod(self, path, mode):
285
+ path = stringify_path(path)
286
+ return os.chmod(path, mode)
287
+
288
+
289
+ def make_path_posix(path):
290
+ """Make path generic and absolute for current OS"""
291
+ if not isinstance(path, str):
292
+ if isinstance(path, (list, set, tuple)):
293
+ return type(path)(make_path_posix(p) for p in path)
294
+ else:
295
+ path = stringify_path(path)
296
+ if not isinstance(path, str):
297
+ raise TypeError(f"could not convert {path!r} to string")
298
+ if os.sep == "/":
299
+ # Native posix
300
+ if path.startswith("/"):
301
+ # most common fast case for posix
302
+ return path
303
+ elif path.startswith("~"):
304
+ return osp.expanduser(path)
305
+ elif path.startswith("./"):
306
+ path = path[2:]
307
+ elif path == ".":
308
+ path = ""
309
+ return f"{os.getcwd()}/{path}"
310
+ else:
311
+ # NT handling
312
+ if path[0:1] == "/" and path[2:3] == ":":
313
+ # path is like "/c:/local/path"
314
+ path = path[1:]
315
+ if path[1:2] == ":":
316
+ # windows full path like "C:\\local\\path"
317
+ if len(path) <= 3:
318
+ # nt root (something like c:/)
319
+ return path[0] + ":/"
320
+ path = path.replace("\\", "/")
321
+ return path
322
+ elif path[0:1] == "~":
323
+ return make_path_posix(osp.expanduser(path))
324
+ elif path.startswith(("\\\\", "//")):
325
+ # windows UNC/DFS-style paths
326
+ return "//" + path[2:].replace("\\", "/")
327
+ elif path.startswith(("\\", "/")):
328
+ # windows relative path with root
329
+ path = path.replace("\\", "/")
330
+ return f"{osp.splitdrive(os.getcwd())[0]}{path}"
331
+ else:
332
+ path = path.replace("\\", "/")
333
+ if path.startswith("./"):
334
+ path = path[2:]
335
+ elif path == ".":
336
+ path = ""
337
+ return f"{make_path_posix(os.getcwd())}/{path}"
338
+
339
+
340
+ def trailing_sep(path):
341
+ """Return True if the path ends with a path separator.
342
+
343
+ A forward slash is always considered a path separator, even on Operating
344
+ Systems that normally use a backslash.
345
+ """
346
+ # TODO: if all incoming paths were posix-compliant then separator would
347
+ # always be a forward slash, simplifying this function.
348
+ # See https://github.com/fsspec/filesystem_spec/pull/1250
349
+ return path.endswith(os.sep) or (os.altsep is not None and path.endswith(os.altsep))
350
+
351
+
352
+ class LocalFileOpener(io.IOBase):
353
+ def __init__(
354
+ self, path, mode, autocommit=True, fs=None, compression=None, **kwargs
355
+ ):
356
+ logger.debug("open file: %s", path)
357
+ self.path = path
358
+ self.mode = mode
359
+ self.fs = fs
360
+ self.f = None
361
+ self.autocommit = autocommit
362
+ self.compression = get_compression(path, compression)
363
+ self.blocksize = io.DEFAULT_BUFFER_SIZE
364
+ self._open()
365
+
366
+ def _open(self):
367
+ if self.f is None or self.f.closed:
368
+ if self.autocommit or "w" not in self.mode:
369
+ self.f = open(self.path, mode=self.mode)
370
+ if self.compression:
371
+ compress = compr[self.compression]
372
+ self.f = compress(self.f, mode=self.mode)
373
+ else:
374
+ # TODO: check if path is writable?
375
+ i, name = tempfile.mkstemp()
376
+ os.close(i) # we want normal open and normal buffered file
377
+ self.temp = name
378
+ self.f = open(name, mode=self.mode)
379
+ if "w" not in self.mode:
380
+ self.size = self.f.seek(0, 2)
381
+ self.f.seek(0)
382
+ self.f.size = self.size
383
+
384
+ def _fetch_range(self, start, end):
385
+ # probably only used by cached FS
386
+ if "r" not in self.mode:
387
+ raise ValueError
388
+ self._open()
389
+ self.f.seek(start)
390
+ return self.f.read(end - start)
391
+
392
+ def __setstate__(self, state):
393
+ self.f = None
394
+ loc = state.pop("loc", None)
395
+ self.__dict__.update(state)
396
+ if "r" in state["mode"]:
397
+ self.f = None
398
+ self._open()
399
+ self.f.seek(loc)
400
+
401
+ def __getstate__(self):
402
+ d = self.__dict__.copy()
403
+ d.pop("f")
404
+ if "r" in self.mode:
405
+ d["loc"] = self.f.tell()
406
+ else:
407
+ if not self.f.closed:
408
+ raise ValueError("Cannot serialise open write-mode local file")
409
+ return d
410
+
411
+ def commit(self):
412
+ if self.autocommit:
413
+ raise RuntimeError("Can only commit if not already set to autocommit")
414
+ shutil.move(self.temp, self.path)
415
+
416
+ def discard(self):
417
+ if self.autocommit:
418
+ raise RuntimeError("Cannot discard if set to autocommit")
419
+ os.remove(self.temp)
420
+
421
+ def readable(self) -> bool:
422
+ return True
423
+
424
+ def writable(self) -> bool:
425
+ return "r" not in self.mode
426
+
427
+ def read(self, *args, **kwargs):
428
+ return self.f.read(*args, **kwargs)
429
+
430
+ def write(self, *args, **kwargs):
431
+ return self.f.write(*args, **kwargs)
432
+
433
+ def tell(self, *args, **kwargs):
434
+ return self.f.tell(*args, **kwargs)
435
+
436
+ def seek(self, *args, **kwargs):
437
+ return self.f.seek(*args, **kwargs)
438
+
439
+ def seekable(self, *args, **kwargs):
440
+ return self.f.seekable(*args, **kwargs)
441
+
442
+ def readline(self, *args, **kwargs):
443
+ return self.f.readline(*args, **kwargs)
444
+
445
+ def readlines(self, *args, **kwargs):
446
+ return self.f.readlines(*args, **kwargs)
447
+
448
+ def close(self):
449
+ return self.f.close()
450
+
451
+ def truncate(self, size=None) -> int:
452
+ return self.f.truncate(size)
453
+
454
+ @property
455
+ def closed(self):
456
+ return self.f.closed
457
+
458
+ def fileno(self):
459
+ return self.raw.fileno()
460
+
461
+ def flush(self) -> None:
462
+ self.f.flush()
463
+
464
+ def __iter__(self):
465
+ return self.f.__iter__()
466
+
467
+ def __getattr__(self, item):
468
+ return getattr(self.f, item)
469
+
470
+ def __enter__(self):
471
+ self._incontext = True
472
+ return self
473
+
474
+ def __exit__(self, exc_type, exc_value, traceback):
475
+ self._incontext = False
476
+ self.f.__exit__(exc_type, exc_value, traceback)
mgm/lib/python3.10/site-packages/fsspec/implementations/memory.py ADDED
@@ -0,0 +1,312 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import logging
4
+ from datetime import datetime, timezone
5
+ from errno import ENOTEMPTY
6
+ from io import BytesIO
7
+ from pathlib import PurePath, PureWindowsPath
8
+ from typing import Any, ClassVar
9
+
10
+ from fsspec import AbstractFileSystem
11
+ from fsspec.implementations.local import LocalFileSystem
12
+ from fsspec.utils import stringify_path
13
+
14
+ logger = logging.getLogger("fsspec.memoryfs")
15
+
16
+
17
+ class MemoryFileSystem(AbstractFileSystem):
18
+ """A filesystem based on a dict of BytesIO objects
19
+
20
+ This is a global filesystem so instances of this class all point to the same
21
+ in memory filesystem.
22
+ """
23
+
24
+ store: ClassVar[dict[str, Any]] = {} # global, do not overwrite!
25
+ pseudo_dirs = [""] # global, do not overwrite!
26
+ protocol = "memory"
27
+ root_marker = "/"
28
+
29
+ @classmethod
30
+ def _strip_protocol(cls, path):
31
+ if isinstance(path, PurePath):
32
+ if isinstance(path, PureWindowsPath):
33
+ return LocalFileSystem._strip_protocol(path)
34
+ else:
35
+ path = stringify_path(path)
36
+
37
+ if path.startswith("memory://"):
38
+ path = path[len("memory://") :]
39
+ if "::" in path or "://" in path:
40
+ return path.rstrip("/")
41
+ path = path.lstrip("/").rstrip("/")
42
+ return "/" + path if path else ""
43
+
44
+ def ls(self, path, detail=True, **kwargs):
45
+ path = self._strip_protocol(path)
46
+ if path in self.store:
47
+ # there is a key with this exact name
48
+ if not detail:
49
+ return [path]
50
+ return [
51
+ {
52
+ "name": path,
53
+ "size": self.store[path].size,
54
+ "type": "file",
55
+ "created": self.store[path].created.timestamp(),
56
+ }
57
+ ]
58
+ paths = set()
59
+ starter = path + "/"
60
+ out = []
61
+ for p2 in tuple(self.store):
62
+ if p2.startswith(starter):
63
+ if "/" not in p2[len(starter) :]:
64
+ # exact child
65
+ out.append(
66
+ {
67
+ "name": p2,
68
+ "size": self.store[p2].size,
69
+ "type": "file",
70
+ "created": self.store[p2].created.timestamp(),
71
+ }
72
+ )
73
+ elif len(p2) > len(starter):
74
+ # implied child directory
75
+ ppath = starter + p2[len(starter) :].split("/", 1)[0]
76
+ if ppath not in paths:
77
+ out = out or []
78
+ out.append(
79
+ {
80
+ "name": ppath,
81
+ "size": 0,
82
+ "type": "directory",
83
+ }
84
+ )
85
+ paths.add(ppath)
86
+ for p2 in self.pseudo_dirs:
87
+ if p2.startswith(starter):
88
+ if "/" not in p2[len(starter) :]:
89
+ # exact child pdir
90
+ if p2 not in paths:
91
+ out.append({"name": p2, "size": 0, "type": "directory"})
92
+ paths.add(p2)
93
+ else:
94
+ # directory implied by deeper pdir
95
+ ppath = starter + p2[len(starter) :].split("/", 1)[0]
96
+ if ppath not in paths:
97
+ out.append({"name": ppath, "size": 0, "type": "directory"})
98
+ paths.add(ppath)
99
+ if not out:
100
+ if path in self.pseudo_dirs:
101
+ # empty dir
102
+ return []
103
+ raise FileNotFoundError(path)
104
+ if detail:
105
+ return out
106
+ return sorted([f["name"] for f in out])
107
+
108
+ def mkdir(self, path, create_parents=True, **kwargs):
109
+ path = self._strip_protocol(path)
110
+ if path in self.store or path in self.pseudo_dirs:
111
+ raise FileExistsError(path)
112
+ if self._parent(path).strip("/") and self.isfile(self._parent(path)):
113
+ raise NotADirectoryError(self._parent(path))
114
+ if create_parents and self._parent(path).strip("/"):
115
+ try:
116
+ self.mkdir(self._parent(path), create_parents, **kwargs)
117
+ except FileExistsError:
118
+ pass
119
+ if path and path not in self.pseudo_dirs:
120
+ self.pseudo_dirs.append(path)
121
+
122
+ def makedirs(self, path, exist_ok=False):
123
+ try:
124
+ self.mkdir(path, create_parents=True)
125
+ except FileExistsError:
126
+ if not exist_ok:
127
+ raise
128
+
129
+ def pipe_file(self, path, value, mode="overwrite", **kwargs):
130
+ """Set the bytes of given file
131
+
132
+ Avoids copies of the data if possible
133
+ """
134
+ mode = "xb" if mode == "create" else "wb"
135
+ self.open(path, mode=mode, data=value)
136
+
137
+ def rmdir(self, path):
138
+ path = self._strip_protocol(path)
139
+ if path == "":
140
+ # silently avoid deleting FS root
141
+ return
142
+ if path in self.pseudo_dirs:
143
+ if not self.ls(path):
144
+ self.pseudo_dirs.remove(path)
145
+ else:
146
+ raise OSError(ENOTEMPTY, "Directory not empty", path)
147
+ else:
148
+ raise FileNotFoundError(path)
149
+
150
+ def info(self, path, **kwargs):
151
+ logger.debug("info: %s", path)
152
+ path = self._strip_protocol(path)
153
+ if path in self.pseudo_dirs or any(
154
+ p.startswith(path + "/") for p in list(self.store) + self.pseudo_dirs
155
+ ):
156
+ return {
157
+ "name": path,
158
+ "size": 0,
159
+ "type": "directory",
160
+ }
161
+ elif path in self.store:
162
+ filelike = self.store[path]
163
+ return {
164
+ "name": path,
165
+ "size": filelike.size,
166
+ "type": "file",
167
+ "created": getattr(filelike, "created", None),
168
+ }
169
+ else:
170
+ raise FileNotFoundError(path)
171
+
172
+ def _open(
173
+ self,
174
+ path,
175
+ mode="rb",
176
+ block_size=None,
177
+ autocommit=True,
178
+ cache_options=None,
179
+ **kwargs,
180
+ ):
181
+ path = self._strip_protocol(path)
182
+ if "x" in mode and self.exists(path):
183
+ raise FileExistsError
184
+ if path in self.pseudo_dirs:
185
+ raise IsADirectoryError(path)
186
+ parent = path
187
+ while len(parent) > 1:
188
+ parent = self._parent(parent)
189
+ if self.isfile(parent):
190
+ raise FileExistsError(parent)
191
+ if mode in ["rb", "ab", "r+b"]:
192
+ if path in self.store:
193
+ f = self.store[path]
194
+ if mode == "ab":
195
+ # position at the end of file
196
+ f.seek(0, 2)
197
+ else:
198
+ # position at the beginning of file
199
+ f.seek(0)
200
+ return f
201
+ else:
202
+ raise FileNotFoundError(path)
203
+ elif mode in {"wb", "xb"}:
204
+ if mode == "xb" and self.exists(path):
205
+ raise FileExistsError
206
+ m = MemoryFile(self, path, kwargs.get("data"))
207
+ if not self._intrans:
208
+ m.commit()
209
+ return m
210
+ else:
211
+ name = self.__class__.__name__
212
+ raise ValueError(f"unsupported file mode for {name}: {mode!r}")
213
+
214
+ def cp_file(self, path1, path2, **kwargs):
215
+ path1 = self._strip_protocol(path1)
216
+ path2 = self._strip_protocol(path2)
217
+ if self.isfile(path1):
218
+ self.store[path2] = MemoryFile(
219
+ self, path2, self.store[path1].getvalue()
220
+ ) # implicit copy
221
+ elif self.isdir(path1):
222
+ if path2 not in self.pseudo_dirs:
223
+ self.pseudo_dirs.append(path2)
224
+ else:
225
+ raise FileNotFoundError(path1)
226
+
227
+ def cat_file(self, path, start=None, end=None, **kwargs):
228
+ logger.debug("cat: %s", path)
229
+ path = self._strip_protocol(path)
230
+ try:
231
+ return bytes(self.store[path].getbuffer()[start:end])
232
+ except KeyError as e:
233
+ raise FileNotFoundError(path) from e
234
+
235
+ def _rm(self, path):
236
+ path = self._strip_protocol(path)
237
+ try:
238
+ del self.store[path]
239
+ except KeyError as e:
240
+ raise FileNotFoundError(path) from e
241
+
242
+ def modified(self, path):
243
+ path = self._strip_protocol(path)
244
+ try:
245
+ return self.store[path].modified
246
+ except KeyError as e:
247
+ raise FileNotFoundError(path) from e
248
+
249
+ def created(self, path):
250
+ path = self._strip_protocol(path)
251
+ try:
252
+ return self.store[path].created
253
+ except KeyError as e:
254
+ raise FileNotFoundError(path) from e
255
+
256
+ def isfile(self, path):
257
+ path = self._strip_protocol(path)
258
+ return path in self.store
259
+
260
+ def rm(self, path, recursive=False, maxdepth=None):
261
+ if isinstance(path, str):
262
+ path = self._strip_protocol(path)
263
+ else:
264
+ path = [self._strip_protocol(p) for p in path]
265
+ paths = self.expand_path(path, recursive=recursive, maxdepth=maxdepth)
266
+ for p in reversed(paths):
267
+ if self.isfile(p):
268
+ self.rm_file(p)
269
+ # If the expanded path doesn't exist, it is only because the expanded
270
+ # path was a directory that does not exist in self.pseudo_dirs. This
271
+ # is possible if you directly create files without making the
272
+ # directories first.
273
+ elif not self.exists(p):
274
+ continue
275
+ else:
276
+ self.rmdir(p)
277
+
278
+
279
+ class MemoryFile(BytesIO):
280
+ """A BytesIO which can't close and works as a context manager
281
+
282
+ Can initialise with data. Each path should only be active once at any moment.
283
+
284
+ No need to provide fs, path if auto-committing (default)
285
+ """
286
+
287
+ def __init__(self, fs=None, path=None, data=None):
288
+ logger.debug("open file %s", path)
289
+ self.fs = fs
290
+ self.path = path
291
+ self.created = datetime.now(tz=timezone.utc)
292
+ self.modified = datetime.now(tz=timezone.utc)
293
+ if data:
294
+ super().__init__(data)
295
+ self.seek(0)
296
+
297
+ @property
298
+ def size(self):
299
+ return self.getbuffer().nbytes
300
+
301
+ def __enter__(self):
302
+ return self
303
+
304
+ def close(self):
305
+ pass
306
+
307
+ def discard(self):
308
+ pass
309
+
310
+ def commit(self):
311
+ self.fs.store[self.path] = self
312
+ self.modified = datetime.now(tz=timezone.utc)
mgm/lib/python3.10/site-packages/fsspec/implementations/reference.py ADDED
@@ -0,0 +1,1216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ import collections
3
+ import io
4
+ import itertools
5
+ import logging
6
+ import math
7
+ import os
8
+ from functools import lru_cache
9
+ from itertools import chain
10
+ from typing import TYPE_CHECKING, Literal
11
+
12
+ import fsspec.core
13
+
14
+ try:
15
+ import ujson as json
16
+ except ImportError:
17
+ if not TYPE_CHECKING:
18
+ import json
19
+
20
+ from fsspec.asyn import AsyncFileSystem
21
+ from fsspec.callbacks import DEFAULT_CALLBACK
22
+ from fsspec.core import filesystem, open, split_protocol
23
+ from fsspec.implementations.asyn_wrapper import AsyncFileSystemWrapper
24
+ from fsspec.utils import isfilelike, merge_offset_ranges, other_paths
25
+
26
+ logger = logging.getLogger("fsspec.reference")
27
+
28
+
29
+ class ReferenceNotReachable(RuntimeError):
30
+ def __init__(self, reference, target, *args):
31
+ super().__init__(*args)
32
+ self.reference = reference
33
+ self.target = target
34
+
35
+ def __str__(self):
36
+ return f'Reference "{self.reference}" failed to fetch target {self.target}'
37
+
38
+
39
+ def _first(d):
40
+ return next(iter(d.values()))
41
+
42
+
43
+ def _prot_in_references(path, references):
44
+ ref = references.get(path)
45
+ if isinstance(ref, (list, tuple)) and isinstance(ref[0], str):
46
+ return split_protocol(ref[0])[0] if ref[0] else ref[0]
47
+
48
+
49
+ def _protocol_groups(paths, references):
50
+ if isinstance(paths, str):
51
+ return {_prot_in_references(paths, references): [paths]}
52
+ out = {}
53
+ for path in paths:
54
+ protocol = _prot_in_references(path, references)
55
+ out.setdefault(protocol, []).append(path)
56
+ return out
57
+
58
+
59
+ class RefsValuesView(collections.abc.ValuesView):
60
+ def __iter__(self):
61
+ for val in self._mapping.zmetadata.values():
62
+ yield json.dumps(val).encode()
63
+ yield from self._mapping._items.values()
64
+ for field in self._mapping.listdir():
65
+ chunk_sizes = self._mapping._get_chunk_sizes(field)
66
+ if len(chunk_sizes) == 0:
67
+ yield self._mapping[field + "/0"]
68
+ continue
69
+ yield from self._mapping._generate_all_records(field)
70
+
71
+
72
+ class RefsItemsView(collections.abc.ItemsView):
73
+ def __iter__(self):
74
+ return zip(self._mapping.keys(), self._mapping.values())
75
+
76
+
77
+ def ravel_multi_index(idx, sizes):
78
+ val = 0
79
+ mult = 1
80
+ for i, s in zip(idx[::-1], sizes[::-1]):
81
+ val += i * mult
82
+ mult *= s
83
+ return val
84
+
85
+
86
+ class LazyReferenceMapper(collections.abc.MutableMapping):
87
+ """This interface can be used to read/write references from Parquet stores.
88
+ It is not intended for other types of references.
89
+ It can be used with Kerchunk's MultiZarrToZarr method to combine
90
+ references into a parquet store.
91
+ Examples of this use-case can be found here:
92
+ https://fsspec.github.io/kerchunk/advanced.html?highlight=parquet#parquet-storage"""
93
+
94
+ # import is class level to prevent numpy dep requirement for fsspec
95
+ @property
96
+ def np(self):
97
+ import numpy as np
98
+
99
+ return np
100
+
101
+ @property
102
+ def pd(self):
103
+ import pandas as pd
104
+
105
+ return pd
106
+
107
+ def __init__(
108
+ self,
109
+ root,
110
+ fs=None,
111
+ out_root=None,
112
+ cache_size=128,
113
+ categorical_threshold=10,
114
+ engine: Literal["fastparquet", "pyarrow"] = "fastparquet",
115
+ ):
116
+ """
117
+
118
+ This instance will be writable, storing changes in memory until full partitions
119
+ are accumulated or .flush() is called.
120
+
121
+ To create an empty lazy store, use .create()
122
+
123
+ Parameters
124
+ ----------
125
+ root : str
126
+ Root of parquet store
127
+ fs : fsspec.AbstractFileSystem
128
+ fsspec filesystem object, default is local filesystem.
129
+ cache_size : int, default=128
130
+ Maximum size of LRU cache, where cache_size*record_size denotes
131
+ the total number of references that can be loaded in memory at once.
132
+ categorical_threshold : int
133
+ Encode urls as pandas.Categorical to reduce memory footprint if the ratio
134
+ of the number of unique urls to total number of refs for each variable
135
+ is greater than or equal to this number. (default 10)
136
+ engine: Literal["fastparquet","pyarrow"]
137
+ Engine choice for reading parquet files. (default is "fastparquet")
138
+ """
139
+
140
+ self.root = root
141
+ self.chunk_sizes = {}
142
+ self.out_root = out_root or self.root
143
+ self.cat_thresh = categorical_threshold
144
+ self.engine = engine
145
+ self.cache_size = cache_size
146
+ self.url = self.root + "/{field}/refs.{record}.parq"
147
+ # TODO: derive fs from `root`
148
+ self.fs = fsspec.filesystem("file") if fs is None else fs
149
+
150
+ from importlib.util import find_spec
151
+
152
+ if self.engine == "pyarrow" and find_spec("pyarrow") is None:
153
+ raise ImportError("engine choice `pyarrow` is not installed.")
154
+
155
+ def __getattr__(self, item):
156
+ if item in ("_items", "record_size", "zmetadata"):
157
+ self.setup()
158
+ # avoid possible recursion if setup fails somehow
159
+ return self.__dict__[item]
160
+ raise AttributeError(item)
161
+
162
+ def setup(self):
163
+ self._items = {}
164
+ self._items[".zmetadata"] = self.fs.cat_file(
165
+ "/".join([self.root, ".zmetadata"])
166
+ )
167
+ met = json.loads(self._items[".zmetadata"])
168
+ self.record_size = met["record_size"]
169
+ self.zmetadata = met["metadata"]
170
+
171
+ # Define function to open and decompress refs
172
+ @lru_cache(maxsize=self.cache_size)
173
+ def open_refs(field, record):
174
+ """cached parquet file loader"""
175
+ path = self.url.format(field=field, record=record)
176
+ data = io.BytesIO(self.fs.cat_file(path))
177
+ try:
178
+ df = self.pd.read_parquet(data, engine=self.engine)
179
+ refs = {c: df[c].to_numpy() for c in df.columns}
180
+ except OSError:
181
+ refs = None
182
+ return refs
183
+
184
+ self.open_refs = open_refs
185
+
186
+ @staticmethod
187
+ def create(root, storage_options=None, fs=None, record_size=10000, **kwargs):
188
+ """Make empty parquet reference set
189
+
190
+ First deletes the contents of the given directory, if it exists.
191
+
192
+ Parameters
193
+ ----------
194
+ root: str
195
+ Directory to contain the output; will be created
196
+ storage_options: dict | None
197
+ For making the filesystem to use for writing is fs is None
198
+ fs: FileSystem | None
199
+ Filesystem for writing
200
+ record_size: int
201
+ Number of references per parquet file
202
+ kwargs: passed to __init__
203
+
204
+ Returns
205
+ -------
206
+ LazyReferenceMapper instance
207
+ """
208
+ met = {"metadata": {}, "record_size": record_size}
209
+ if fs is None:
210
+ fs, root = fsspec.core.url_to_fs(root, **(storage_options or {}))
211
+ if fs.exists(root):
212
+ fs.rm(root, recursive=True)
213
+ fs.makedirs(root, exist_ok=True)
214
+ fs.pipe("/".join([root, ".zmetadata"]), json.dumps(met).encode())
215
+ return LazyReferenceMapper(root, fs, **kwargs)
216
+
217
+ @lru_cache()
218
+ def listdir(self):
219
+ """List top-level directories"""
220
+ dirs = (p.rsplit("/", 1)[0] for p in self.zmetadata if not p.startswith(".z"))
221
+ return set(dirs)
222
+
223
+ def ls(self, path="", detail=True):
224
+ """Shortcut file listings"""
225
+ path = path.rstrip("/")
226
+ pathdash = path + "/" if path else ""
227
+ dirnames = self.listdir()
228
+ dirs = [
229
+ d
230
+ for d in dirnames
231
+ if d.startswith(pathdash) and "/" not in d.lstrip(pathdash)
232
+ ]
233
+ if dirs:
234
+ others = {
235
+ f
236
+ for f in chain(
237
+ [".zmetadata"],
238
+ (name for name in self.zmetadata),
239
+ (name for name in self._items),
240
+ )
241
+ if f.startswith(pathdash) and "/" not in f.lstrip(pathdash)
242
+ }
243
+ if detail is False:
244
+ others.update(dirs)
245
+ return sorted(others)
246
+ dirinfo = [{"name": name, "type": "directory", "size": 0} for name in dirs]
247
+ fileinfo = [
248
+ {
249
+ "name": name,
250
+ "type": "file",
251
+ "size": len(
252
+ json.dumps(self.zmetadata[name])
253
+ if name in self.zmetadata
254
+ else self._items[name]
255
+ ),
256
+ }
257
+ for name in others
258
+ ]
259
+ return sorted(dirinfo + fileinfo, key=lambda s: s["name"])
260
+ field = path
261
+ others = set(
262
+ [name for name in self.zmetadata if name.startswith(f"{path}/")]
263
+ + [name for name in self._items if name.startswith(f"{path}/")]
264
+ )
265
+ fileinfo = [
266
+ {
267
+ "name": name,
268
+ "type": "file",
269
+ "size": len(
270
+ json.dumps(self.zmetadata[name])
271
+ if name in self.zmetadata
272
+ else self._items[name]
273
+ ),
274
+ }
275
+ for name in others
276
+ ]
277
+ keys = self._keys_in_field(field)
278
+
279
+ if detail is False:
280
+ return list(others) + list(keys)
281
+ recs = self._generate_all_records(field)
282
+ recinfo = [
283
+ {"name": name, "type": "file", "size": rec[-1]}
284
+ for name, rec in zip(keys, recs)
285
+ if rec[0] # filters out path==None, deleted/missing
286
+ ]
287
+ return fileinfo + recinfo
288
+
289
+ def _load_one_key(self, key):
290
+ """Get the reference for one key
291
+
292
+ Returns bytes, one-element list or three-element list.
293
+ """
294
+ if key in self._items:
295
+ return self._items[key]
296
+ elif key in self.zmetadata:
297
+ return json.dumps(self.zmetadata[key]).encode()
298
+ elif "/" not in key or self._is_meta(key):
299
+ raise KeyError(key)
300
+ field, _ = key.rsplit("/", 1)
301
+ record, ri, chunk_size = self._key_to_record(key)
302
+ maybe = self._items.get((field, record), {}).get(ri, False)
303
+ if maybe is None:
304
+ # explicitly deleted
305
+ raise KeyError
306
+ elif maybe:
307
+ return maybe
308
+ elif chunk_size == 0:
309
+ return b""
310
+
311
+ # Chunk keys can be loaded from row group and cached in LRU cache
312
+ try:
313
+ refs = self.open_refs(field, record)
314
+ except (ValueError, TypeError, FileNotFoundError) as exc:
315
+ raise KeyError(key) from exc
316
+ columns = ["path", "offset", "size", "raw"]
317
+ selection = [refs[c][ri] if c in refs else None for c in columns]
318
+ raw = selection[-1]
319
+ if raw is not None:
320
+ return raw
321
+ if selection[0] is None:
322
+ raise KeyError("This reference does not exist or has been deleted")
323
+ if selection[1:3] == [0, 0]:
324
+ # URL only
325
+ return selection[:1]
326
+ # URL, offset, size
327
+ return selection[:3]
328
+
329
+ @lru_cache(4096)
330
+ def _key_to_record(self, key):
331
+ """Details needed to construct a reference for one key"""
332
+ field, chunk = key.rsplit("/", 1)
333
+ chunk_sizes = self._get_chunk_sizes(field)
334
+ if len(chunk_sizes) == 0:
335
+ return 0, 0, 0
336
+ chunk_idx = [int(c) for c in chunk.split(".")]
337
+ chunk_number = ravel_multi_index(chunk_idx, chunk_sizes)
338
+ record = chunk_number // self.record_size
339
+ ri = chunk_number % self.record_size
340
+ return record, ri, len(chunk_sizes)
341
+
342
+ def _get_chunk_sizes(self, field):
343
+ """The number of chunks along each axis for a given field"""
344
+ if field not in self.chunk_sizes:
345
+ zarray = self.zmetadata[f"{field}/.zarray"]
346
+ size_ratio = [
347
+ math.ceil(s / c) for s, c in zip(zarray["shape"], zarray["chunks"])
348
+ ]
349
+ self.chunk_sizes[field] = size_ratio or [1]
350
+ return self.chunk_sizes[field]
351
+
352
+ def _generate_record(self, field, record):
353
+ """The references for a given parquet file of a given field"""
354
+ refs = self.open_refs(field, record)
355
+ it = iter(zip(*refs.values()))
356
+ if len(refs) == 3:
357
+ # All urls
358
+ return (list(t) for t in it)
359
+ elif len(refs) == 1:
360
+ # All raws
361
+ return refs["raw"]
362
+ else:
363
+ # Mix of urls and raws
364
+ return (list(t[:3]) if not t[3] else t[3] for t in it)
365
+
366
+ def _generate_all_records(self, field):
367
+ """Load all the references within a field by iterating over the parquet files"""
368
+ nrec = 1
369
+ for ch in self._get_chunk_sizes(field):
370
+ nrec *= ch
371
+ nrec = math.ceil(nrec / self.record_size)
372
+ for record in range(nrec):
373
+ yield from self._generate_record(field, record)
374
+
375
+ def values(self):
376
+ return RefsValuesView(self)
377
+
378
+ def items(self):
379
+ return RefsItemsView(self)
380
+
381
+ def __hash__(self):
382
+ return id(self)
383
+
384
+ def __getitem__(self, key):
385
+ return self._load_one_key(key)
386
+
387
+ def __setitem__(self, key, value):
388
+ if "/" in key and not self._is_meta(key):
389
+ field, chunk = key.rsplit("/", 1)
390
+ record, i, _ = self._key_to_record(key)
391
+ subdict = self._items.setdefault((field, record), {})
392
+ subdict[i] = value
393
+ if len(subdict) == self.record_size:
394
+ self.write(field, record)
395
+ else:
396
+ # metadata or top-level
397
+ self._items[key] = value
398
+ new_value = json.loads(
399
+ value.decode() if isinstance(value, bytes) else value
400
+ )
401
+ self.zmetadata[key] = {**self.zmetadata.get(key, {}), **new_value}
402
+
403
+ @staticmethod
404
+ def _is_meta(key):
405
+ return key.startswith(".z") or "/.z" in key
406
+
407
+ def __delitem__(self, key):
408
+ if key in self._items:
409
+ del self._items[key]
410
+ elif key in self.zmetadata:
411
+ del self.zmetadata[key]
412
+ else:
413
+ if "/" in key and not self._is_meta(key):
414
+ field, _ = key.rsplit("/", 1)
415
+ record, i, _ = self._key_to_record(key)
416
+ subdict = self._items.setdefault((field, record), {})
417
+ subdict[i] = None
418
+ if len(subdict) == self.record_size:
419
+ self.write(field, record)
420
+ else:
421
+ # metadata or top-level
422
+ self._items[key] = None
423
+
424
+ def write(self, field, record, base_url=None, storage_options=None):
425
+ # extra requirements if writing
426
+ import kerchunk.df
427
+ import numpy as np
428
+ import pandas as pd
429
+
430
+ partition = self._items[(field, record)]
431
+ original = False
432
+ if len(partition) < self.record_size:
433
+ try:
434
+ original = self.open_refs(field, record)
435
+ except OSError:
436
+ pass
437
+
438
+ if original:
439
+ paths = original["path"]
440
+ offsets = original["offset"]
441
+ sizes = original["size"]
442
+ raws = original["raw"]
443
+ else:
444
+ paths = np.full(self.record_size, np.nan, dtype="O")
445
+ offsets = np.zeros(self.record_size, dtype="int64")
446
+ sizes = np.zeros(self.record_size, dtype="int64")
447
+ raws = np.full(self.record_size, np.nan, dtype="O")
448
+ for j, data in partition.items():
449
+ if isinstance(data, list):
450
+ if (
451
+ str(paths.dtype) == "category"
452
+ and data[0] not in paths.dtype.categories
453
+ ):
454
+ paths = paths.add_categories(data[0])
455
+ paths[j] = data[0]
456
+ if len(data) > 1:
457
+ offsets[j] = data[1]
458
+ sizes[j] = data[2]
459
+ elif data is None:
460
+ # delete
461
+ paths[j] = None
462
+ offsets[j] = 0
463
+ sizes[j] = 0
464
+ raws[j] = None
465
+ else:
466
+ # this is the only call into kerchunk, could remove
467
+ raws[j] = kerchunk.df._proc_raw(data)
468
+ # TODO: only save needed columns
469
+ df = pd.DataFrame(
470
+ {
471
+ "path": paths,
472
+ "offset": offsets,
473
+ "size": sizes,
474
+ "raw": raws,
475
+ },
476
+ copy=False,
477
+ )
478
+ if df.path.count() / (df.path.nunique() or 1) > self.cat_thresh:
479
+ df["path"] = df["path"].astype("category")
480
+ object_encoding = {"raw": "bytes", "path": "utf8"}
481
+ has_nulls = ["path", "raw"]
482
+
483
+ fn = f"{base_url or self.out_root}/{field}/refs.{record}.parq"
484
+ self.fs.mkdirs(f"{base_url or self.out_root}/{field}", exist_ok=True)
485
+
486
+ if self.engine == "pyarrow":
487
+ df_backend_kwargs = {"write_statistics": False}
488
+ elif self.engine == "fastparquet":
489
+ df_backend_kwargs = {
490
+ "stats": False,
491
+ "object_encoding": object_encoding,
492
+ "has_nulls": has_nulls,
493
+ }
494
+ else:
495
+ raise NotImplementedError(f"{self.engine} not supported")
496
+
497
+ df.to_parquet(
498
+ fn,
499
+ engine=self.engine,
500
+ storage_options=storage_options
501
+ or getattr(self.fs, "storage_options", None),
502
+ compression="zstd",
503
+ index=False,
504
+ **df_backend_kwargs,
505
+ )
506
+
507
+ partition.clear()
508
+ self._items.pop((field, record))
509
+
510
+ def flush(self, base_url=None, storage_options=None):
511
+ """Output any modified or deleted keys
512
+
513
+ Parameters
514
+ ----------
515
+ base_url: str
516
+ Location of the output
517
+ """
518
+
519
+ # write what we have so far and clear sub chunks
520
+ for thing in list(self._items):
521
+ if isinstance(thing, tuple):
522
+ field, record = thing
523
+ self.write(
524
+ field,
525
+ record,
526
+ base_url=base_url,
527
+ storage_options=storage_options,
528
+ )
529
+
530
+ # gather .zmetadata from self._items and write that too
531
+ for k in list(self._items):
532
+ if k != ".zmetadata" and ".z" in k:
533
+ self.zmetadata[k] = json.loads(self._items.pop(k))
534
+ met = {"metadata": self.zmetadata, "record_size": self.record_size}
535
+ self._items.clear()
536
+ self._items[".zmetadata"] = json.dumps(met).encode()
537
+ self.fs.pipe(
538
+ "/".join([base_url or self.out_root, ".zmetadata"]),
539
+ self._items[".zmetadata"],
540
+ )
541
+
542
+ # TODO: only clear those that we wrote to?
543
+ self.open_refs.cache_clear()
544
+
545
+ def __len__(self):
546
+ # Caveat: This counts expected references, not actual - but is fast
547
+ count = 0
548
+ for field in self.listdir():
549
+ if field.startswith("."):
550
+ count += 1
551
+ else:
552
+ count += math.prod(self._get_chunk_sizes(field))
553
+ count += len(self.zmetadata) # all metadata keys
554
+ # any other files not in reference partitions
555
+ count += sum(1 for _ in self._items if not isinstance(_, tuple))
556
+ return count
557
+
558
+ def __iter__(self):
559
+ # Caveat: returns only existing keys, so the number of these does not
560
+ # match len(self)
561
+ metas = set(self.zmetadata)
562
+ metas.update(self._items)
563
+ for bit in metas:
564
+ if isinstance(bit, str):
565
+ yield bit
566
+ for field in self.listdir():
567
+ for k in self._keys_in_field(field):
568
+ if k in self:
569
+ yield k
570
+
571
+ def __contains__(self, item):
572
+ try:
573
+ self._load_one_key(item)
574
+ return True
575
+ except KeyError:
576
+ return False
577
+
578
+ def _keys_in_field(self, field):
579
+ """List key names in given field
580
+
581
+ Produces strings like "field/x.y" appropriate from the chunking of the array
582
+ """
583
+ chunk_sizes = self._get_chunk_sizes(field)
584
+ if len(chunk_sizes) == 0:
585
+ yield field + "/0"
586
+ return
587
+ inds = itertools.product(*(range(i) for i in chunk_sizes))
588
+ for ind in inds:
589
+ yield field + "/" + ".".join([str(c) for c in ind])
590
+
591
+
592
+ class ReferenceFileSystem(AsyncFileSystem):
593
+ """View byte ranges of some other file as a file system
594
+ Initial version: single file system target, which must support
595
+ async, and must allow start and end args in _cat_file. Later versions
596
+ may allow multiple arbitrary URLs for the targets.
597
+ This FileSystem is read-only. It is designed to be used with async
598
+ targets (for now). This FileSystem only allows whole-file access, no
599
+ ``open``. We do not get original file details from the target FS.
600
+ Configuration is by passing a dict of references at init, or a URL to
601
+ a JSON file containing the same; this dict
602
+ can also contain concrete data for some set of paths.
603
+ Reference dict format:
604
+ {path0: bytes_data, path1: (target_url, offset, size)}
605
+ https://github.com/fsspec/kerchunk/blob/main/README.md
606
+ """
607
+
608
+ protocol = "reference"
609
+
610
+ def __init__(
611
+ self,
612
+ fo,
613
+ target=None,
614
+ ref_storage_args=None,
615
+ target_protocol=None,
616
+ target_options=None,
617
+ remote_protocol=None,
618
+ remote_options=None,
619
+ fs=None,
620
+ template_overrides=None,
621
+ simple_templates=True,
622
+ max_gap=64_000,
623
+ max_block=256_000_000,
624
+ cache_size=128,
625
+ **kwargs,
626
+ ):
627
+ """
628
+ Parameters
629
+ ----------
630
+ fo : dict or str
631
+ The set of references to use for this instance, with a structure as above.
632
+ If str referencing a JSON file, will use fsspec.open, in conjunction
633
+ with target_options and target_protocol to open and parse JSON at this
634
+ location. If a directory, then assume references are a set of parquet
635
+ files to be loaded lazily.
636
+ target : str
637
+ For any references having target_url as None, this is the default file
638
+ target to use
639
+ ref_storage_args : dict
640
+ If references is a str, use these kwargs for loading the JSON file.
641
+ Deprecated: use target_options instead.
642
+ target_protocol : str
643
+ Used for loading the reference file, if it is a path. If None, protocol
644
+ will be derived from the given path
645
+ target_options : dict
646
+ Extra FS options for loading the reference file ``fo``, if given as a path
647
+ remote_protocol : str
648
+ The protocol of the filesystem on which the references will be evaluated
649
+ (unless fs is provided). If not given, will be derived from the first
650
+ URL that has a protocol in the templates or in the references, in that
651
+ order.
652
+ remote_options : dict
653
+ kwargs to go with remote_protocol
654
+ fs : AbstractFileSystem | dict(str, (AbstractFileSystem | dict))
655
+ Directly provide a file system(s):
656
+ - a single filesystem instance
657
+ - a dict of protocol:filesystem, where each value is either a filesystem
658
+ instance, or a dict of kwargs that can be used to create in
659
+ instance for the given protocol
660
+
661
+ If this is given, remote_options and remote_protocol are ignored.
662
+ template_overrides : dict
663
+ Swap out any templates in the references file with these - useful for
664
+ testing.
665
+ simple_templates: bool
666
+ Whether templates can be processed with simple replace (True) or if
667
+ jinja is needed (False, much slower). All reference sets produced by
668
+ ``kerchunk`` are simple in this sense, but the spec allows for complex.
669
+ max_gap, max_block: int
670
+ For merging multiple concurrent requests to the same remote file.
671
+ Neighboring byte ranges will only be merged when their
672
+ inter-range gap is <= ``max_gap``. Default is 64KB. Set to 0
673
+ to only merge when it requires no extra bytes. Pass a negative
674
+ number to disable merging, appropriate for local target files.
675
+ Neighboring byte ranges will only be merged when the size of
676
+ the aggregated range is <= ``max_block``. Default is 256MB.
677
+ cache_size : int
678
+ Maximum size of LRU cache, where cache_size*record_size denotes
679
+ the total number of references that can be loaded in memory at once.
680
+ Only used for lazily loaded references.
681
+ kwargs : passed to parent class
682
+ """
683
+ super().__init__(**kwargs)
684
+ self.target = target
685
+ self.template_overrides = template_overrides
686
+ self.simple_templates = simple_templates
687
+ self.templates = {}
688
+ self.fss = {}
689
+ self._dircache = {}
690
+ self.max_gap = max_gap
691
+ self.max_block = max_block
692
+ if isinstance(fo, str):
693
+ dic = dict(
694
+ **(ref_storage_args or target_options or {}), protocol=target_protocol
695
+ )
696
+ ref_fs, fo2 = fsspec.core.url_to_fs(fo, **dic)
697
+ if ref_fs.isfile(fo2):
698
+ # text JSON
699
+ with fsspec.open(fo, "rb", **dic) as f:
700
+ logger.info("Read reference from URL %s", fo)
701
+ text = json.load(f)
702
+ self._process_references(text, template_overrides)
703
+ else:
704
+ # Lazy parquet refs
705
+ logger.info("Open lazy reference dict from URL %s", fo)
706
+ self.references = LazyReferenceMapper(
707
+ fo2,
708
+ fs=ref_fs,
709
+ cache_size=cache_size,
710
+ )
711
+ else:
712
+ # dictionaries
713
+ self._process_references(fo, template_overrides)
714
+ if isinstance(fs, dict):
715
+ self.fss = {
716
+ k: (
717
+ fsspec.filesystem(k.split(":", 1)[0], **opts)
718
+ if isinstance(opts, dict)
719
+ else opts
720
+ )
721
+ for k, opts in fs.items()
722
+ }
723
+ if None not in self.fss:
724
+ self.fss[None] = filesystem("file")
725
+ return
726
+ if fs is not None:
727
+ # single remote FS
728
+ remote_protocol = (
729
+ fs.protocol[0] if isinstance(fs.protocol, tuple) else fs.protocol
730
+ )
731
+ self.fss[remote_protocol] = fs
732
+
733
+ if remote_protocol is None:
734
+ # get single protocol from any templates
735
+ for ref in self.templates.values():
736
+ if callable(ref):
737
+ ref = ref()
738
+ protocol, _ = fsspec.core.split_protocol(ref)
739
+ if protocol and protocol not in self.fss:
740
+ fs = filesystem(protocol, **(remote_options or {}))
741
+ self.fss[protocol] = fs
742
+ if remote_protocol is None:
743
+ # get single protocol from references
744
+ # TODO: warning here, since this can be very expensive?
745
+ for ref in self.references.values():
746
+ if callable(ref):
747
+ ref = ref()
748
+ if isinstance(ref, list) and ref[0]:
749
+ protocol, _ = fsspec.core.split_protocol(ref[0])
750
+ if protocol not in self.fss:
751
+ fs = filesystem(protocol, **(remote_options or {}))
752
+ self.fss[protocol] = fs
753
+ # only use first remote URL
754
+ break
755
+
756
+ if remote_protocol and remote_protocol not in self.fss:
757
+ fs = filesystem(remote_protocol, **(remote_options or {}))
758
+ self.fss[remote_protocol] = fs
759
+
760
+ self.fss[None] = fs or filesystem("file") # default one
761
+ # Wrap any non-async filesystems to ensure async methods are available below
762
+ for k, f in self.fss.items():
763
+ if not f.async_impl:
764
+ self.fss[k] = AsyncFileSystemWrapper(f)
765
+
766
+ def _cat_common(self, path, start=None, end=None):
767
+ path = self._strip_protocol(path)
768
+ logger.debug(f"cat: {path}")
769
+ try:
770
+ part = self.references[path]
771
+ except KeyError as exc:
772
+ raise FileNotFoundError(path) from exc
773
+ if isinstance(part, str):
774
+ part = part.encode()
775
+ if isinstance(part, bytes):
776
+ logger.debug(f"Reference: {path}, type bytes")
777
+ if part.startswith(b"base64:"):
778
+ part = base64.b64decode(part[7:])
779
+ return part, None, None
780
+
781
+ if len(part) == 1:
782
+ logger.debug(f"Reference: {path}, whole file => {part}")
783
+ url = part[0]
784
+ start1, end1 = start, end
785
+ else:
786
+ url, start0, size = part
787
+ logger.debug(f"Reference: {path} => {url}, offset {start0}, size {size}")
788
+ end0 = start0 + size
789
+
790
+ if start is not None:
791
+ if start >= 0:
792
+ start1 = start0 + start
793
+ else:
794
+ start1 = end0 + start
795
+ else:
796
+ start1 = start0
797
+ if end is not None:
798
+ if end >= 0:
799
+ end1 = start0 + end
800
+ else:
801
+ end1 = end0 + end
802
+ else:
803
+ end1 = end0
804
+ if url is None:
805
+ url = self.target
806
+ return url, start1, end1
807
+
808
+ async def _cat_file(self, path, start=None, end=None, **kwargs):
809
+ part_or_url, start0, end0 = self._cat_common(path, start=start, end=end)
810
+ if isinstance(part_or_url, bytes):
811
+ return part_or_url[start:end]
812
+ protocol, _ = split_protocol(part_or_url)
813
+ try:
814
+ return await self.fss[protocol]._cat_file(
815
+ part_or_url, start=start0, end=end0
816
+ )
817
+ except Exception as e:
818
+ raise ReferenceNotReachable(path, part_or_url) from e
819
+
820
+ def cat_file(self, path, start=None, end=None, **kwargs):
821
+ part_or_url, start0, end0 = self._cat_common(path, start=start, end=end)
822
+ if isinstance(part_or_url, bytes):
823
+ return part_or_url[start:end]
824
+ protocol, _ = split_protocol(part_or_url)
825
+ try:
826
+ return self.fss[protocol].cat_file(part_or_url, start=start0, end=end0)
827
+ except Exception as e:
828
+ raise ReferenceNotReachable(path, part_or_url) from e
829
+
830
+ def pipe_file(self, path, value, **_):
831
+ """Temporarily add binary data or reference as a file"""
832
+ self.references[path] = value
833
+
834
+ async def _get_file(self, rpath, lpath, **kwargs):
835
+ if self.isdir(rpath):
836
+ return os.makedirs(lpath, exist_ok=True)
837
+ data = await self._cat_file(rpath)
838
+ with open(lpath, "wb") as f:
839
+ f.write(data)
840
+
841
+ def get_file(self, rpath, lpath, callback=DEFAULT_CALLBACK, **kwargs):
842
+ if self.isdir(rpath):
843
+ return os.makedirs(lpath, exist_ok=True)
844
+ data = self.cat_file(rpath, **kwargs)
845
+ callback.set_size(len(data))
846
+ if isfilelike(lpath):
847
+ lpath.write(data)
848
+ else:
849
+ with open(lpath, "wb") as f:
850
+ f.write(data)
851
+ callback.absolute_update(len(data))
852
+
853
+ def get(self, rpath, lpath, recursive=False, **kwargs):
854
+ if recursive:
855
+ # trigger directory build
856
+ self.ls("")
857
+ rpath = self.expand_path(rpath, recursive=recursive)
858
+ fs = fsspec.filesystem("file", auto_mkdir=True)
859
+ targets = other_paths(rpath, lpath)
860
+ if recursive:
861
+ data = self.cat([r for r in rpath if not self.isdir(r)])
862
+ else:
863
+ data = self.cat(rpath)
864
+ for remote, local in zip(rpath, targets):
865
+ if remote in data:
866
+ fs.pipe_file(local, data[remote])
867
+
868
+ def cat(self, path, recursive=False, on_error="raise", **kwargs):
869
+ if isinstance(path, str) and recursive:
870
+ raise NotImplementedError
871
+ if isinstance(path, list) and (recursive or any("*" in p for p in path)):
872
+ raise NotImplementedError
873
+ # TODO: if references is lazy, pre-fetch all paths in batch before access
874
+ proto_dict = _protocol_groups(path, self.references)
875
+ out = {}
876
+ for proto, paths in proto_dict.items():
877
+ fs = self.fss[proto]
878
+ urls, starts, ends, valid_paths = [], [], [], []
879
+ for p in paths:
880
+ # find references or label not-found. Early exit if any not
881
+ # found and on_error is "raise"
882
+ try:
883
+ u, s, e = self._cat_common(p)
884
+ if not isinstance(u, (bytes, str)):
885
+ # nan/None from parquet
886
+ continue
887
+ except FileNotFoundError as err:
888
+ if on_error == "raise":
889
+ raise
890
+ if on_error != "omit":
891
+ out[p] = err
892
+ else:
893
+ urls.append(u)
894
+ starts.append(s)
895
+ ends.append(e)
896
+ valid_paths.append(p)
897
+
898
+ # process references into form for merging
899
+ urls2 = []
900
+ starts2 = []
901
+ ends2 = []
902
+ paths2 = []
903
+ whole_files = set()
904
+ for u, s, e, p in zip(urls, starts, ends, valid_paths):
905
+ if isinstance(u, bytes):
906
+ # data
907
+ out[p] = u
908
+ elif s is None:
909
+ # whole file - limits are None, None, but no further
910
+ # entries take for this file
911
+ whole_files.add(u)
912
+ urls2.append(u)
913
+ starts2.append(s)
914
+ ends2.append(e)
915
+ paths2.append(p)
916
+ for u, s, e, p in zip(urls, starts, ends, valid_paths):
917
+ # second run to account for files that are to be loaded whole
918
+ if s is not None and u not in whole_files:
919
+ urls2.append(u)
920
+ starts2.append(s)
921
+ ends2.append(e)
922
+ paths2.append(p)
923
+
924
+ # merge and fetch consolidated ranges
925
+ new_paths, new_starts, new_ends = merge_offset_ranges(
926
+ list(urls2),
927
+ list(starts2),
928
+ list(ends2),
929
+ sort=True,
930
+ max_gap=self.max_gap,
931
+ max_block=self.max_block,
932
+ )
933
+ bytes_out = fs.cat_ranges(new_paths, new_starts, new_ends)
934
+
935
+ # unbundle from merged bytes - simple approach
936
+ for u, s, e, p in zip(urls, starts, ends, valid_paths):
937
+ if p in out:
938
+ continue # was bytes, already handled
939
+ for np, ns, ne, b in zip(new_paths, new_starts, new_ends, bytes_out):
940
+ if np == u and (ns is None or ne is None):
941
+ if isinstance(b, Exception):
942
+ out[p] = b
943
+ else:
944
+ out[p] = b[s:e]
945
+ elif np == u and s >= ns and e <= ne:
946
+ if isinstance(b, Exception):
947
+ out[p] = b
948
+ else:
949
+ out[p] = b[s - ns : (e - ne) or None]
950
+
951
+ for k, v in out.copy().items():
952
+ # these were valid references, but fetch failed, so transform exc
953
+ if isinstance(v, Exception) and k in self.references:
954
+ ex = out[k]
955
+ new_ex = ReferenceNotReachable(k, self.references[k])
956
+ new_ex.__cause__ = ex
957
+ if on_error == "raise":
958
+ raise new_ex
959
+ elif on_error != "omit":
960
+ out[k] = new_ex
961
+
962
+ if len(out) == 1 and isinstance(path, str) and "*" not in path:
963
+ return _first(out)
964
+ return out
965
+
966
+ def _process_references(self, references, template_overrides=None):
967
+ vers = references.get("version", None)
968
+ if vers is None:
969
+ self._process_references0(references)
970
+ elif vers == 1:
971
+ self._process_references1(references, template_overrides=template_overrides)
972
+ else:
973
+ raise ValueError(f"Unknown reference spec version: {vers}")
974
+ # TODO: we make dircache by iterating over all entries, but for Spec >= 1,
975
+ # can replace with programmatic. Is it even needed for mapper interface?
976
+
977
+ def _process_references0(self, references):
978
+ """Make reference dict for Spec Version 0"""
979
+ if isinstance(references, dict):
980
+ # do not do this for lazy/parquet backend, which will not make dicts,
981
+ # but must remain writable in the original object
982
+ references = {
983
+ key: json.dumps(val) if isinstance(val, dict) else val
984
+ for key, val in references.items()
985
+ }
986
+ self.references = references
987
+
988
+ def _process_references1(self, references, template_overrides=None):
989
+ if not self.simple_templates or self.templates:
990
+ import jinja2
991
+ self.references = {}
992
+ self._process_templates(references.get("templates", {}))
993
+
994
+ @lru_cache(1000)
995
+ def _render_jinja(u):
996
+ return jinja2.Template(u).render(**self.templates)
997
+
998
+ for k, v in references.get("refs", {}).items():
999
+ if isinstance(v, str):
1000
+ if v.startswith("base64:"):
1001
+ self.references[k] = base64.b64decode(v[7:])
1002
+ self.references[k] = v
1003
+ elif isinstance(v, dict):
1004
+ self.references[k] = json.dumps(v)
1005
+ elif self.templates:
1006
+ u = v[0]
1007
+ if "{{" in u:
1008
+ if self.simple_templates:
1009
+ u = (
1010
+ u.replace("{{", "{")
1011
+ .replace("}}", "}")
1012
+ .format(**self.templates)
1013
+ )
1014
+ else:
1015
+ u = _render_jinja(u)
1016
+ self.references[k] = [u] if len(v) == 1 else [u, v[1], v[2]]
1017
+ else:
1018
+ self.references[k] = v
1019
+ self.references.update(self._process_gen(references.get("gen", [])))
1020
+
1021
+ def _process_templates(self, tmp):
1022
+ self.templates = {}
1023
+ if self.template_overrides is not None:
1024
+ tmp.update(self.template_overrides)
1025
+ for k, v in tmp.items():
1026
+ if "{{" in v:
1027
+ import jinja2
1028
+
1029
+ self.templates[k] = lambda temp=v, **kwargs: jinja2.Template(
1030
+ temp
1031
+ ).render(**kwargs)
1032
+ else:
1033
+ self.templates[k] = v
1034
+
1035
+ def _process_gen(self, gens):
1036
+ out = {}
1037
+ for gen in gens:
1038
+ dimension = {
1039
+ k: (
1040
+ v
1041
+ if isinstance(v, list)
1042
+ else range(v.get("start", 0), v["stop"], v.get("step", 1))
1043
+ )
1044
+ for k, v in gen["dimensions"].items()
1045
+ }
1046
+ products = (
1047
+ dict(zip(dimension.keys(), values))
1048
+ for values in itertools.product(*dimension.values())
1049
+ )
1050
+ for pr in products:
1051
+ import jinja2
1052
+
1053
+ key = jinja2.Template(gen["key"]).render(**pr, **self.templates)
1054
+ url = jinja2.Template(gen["url"]).render(**pr, **self.templates)
1055
+ if ("offset" in gen) and ("length" in gen):
1056
+ offset = int(
1057
+ jinja2.Template(gen["offset"]).render(**pr, **self.templates)
1058
+ )
1059
+ length = int(
1060
+ jinja2.Template(gen["length"]).render(**pr, **self.templates)
1061
+ )
1062
+ out[key] = [url, offset, length]
1063
+ elif ("offset" in gen) ^ ("length" in gen):
1064
+ raise ValueError(
1065
+ "Both 'offset' and 'length' are required for a "
1066
+ "reference generator entry if either is provided."
1067
+ )
1068
+ else:
1069
+ out[key] = [url]
1070
+ return out
1071
+
1072
+ def _dircache_from_items(self):
1073
+ self.dircache = {"": []}
1074
+ it = self.references.items()
1075
+ for path, part in it:
1076
+ if isinstance(part, (bytes, str)):
1077
+ size = len(part)
1078
+ elif len(part) == 1:
1079
+ size = None
1080
+ else:
1081
+ _, _, size = part
1082
+ par = path.rsplit("/", 1)[0] if "/" in path else ""
1083
+ par0 = par
1084
+ subdirs = [par0]
1085
+ while par0 and par0 not in self.dircache:
1086
+ # collect parent directories
1087
+ par0 = self._parent(par0)
1088
+ subdirs.append(par0)
1089
+
1090
+ subdirs.reverse()
1091
+ for parent, child in zip(subdirs, subdirs[1:]):
1092
+ # register newly discovered directories
1093
+ assert child not in self.dircache
1094
+ assert parent in self.dircache
1095
+ self.dircache[parent].append(
1096
+ {"name": child, "type": "directory", "size": 0}
1097
+ )
1098
+ self.dircache[child] = []
1099
+
1100
+ self.dircache[par].append({"name": path, "type": "file", "size": size})
1101
+
1102
+ def _open(self, path, mode="rb", block_size=None, cache_options=None, **kwargs):
1103
+ data = self.cat_file(path) # load whole chunk into memory
1104
+ return io.BytesIO(data)
1105
+
1106
+ def ls(self, path, detail=True, **kwargs):
1107
+ path = self._strip_protocol(path)
1108
+ if isinstance(self.references, LazyReferenceMapper):
1109
+ try:
1110
+ return self.references.ls(path, detail)
1111
+ except KeyError:
1112
+ pass
1113
+ raise FileNotFoundError(f"'{path}' is not a known key")
1114
+ if not self.dircache:
1115
+ self._dircache_from_items()
1116
+ out = self._ls_from_cache(path)
1117
+ if out is None:
1118
+ raise FileNotFoundError(path)
1119
+ if detail:
1120
+ return out
1121
+ return [o["name"] for o in out]
1122
+
1123
+ def exists(self, path, **kwargs): # overwrite auto-sync version
1124
+ return self.isdir(path) or self.isfile(path)
1125
+
1126
+ def isdir(self, path): # overwrite auto-sync version
1127
+ if self.dircache:
1128
+ return path in self.dircache
1129
+ elif isinstance(self.references, LazyReferenceMapper):
1130
+ return path in self.references.listdir()
1131
+ else:
1132
+ # this may be faster than building dircache for single calls, but
1133
+ # by looping will be slow for many calls; could cache it?
1134
+ return any(_.startswith(f"{path}/") for _ in self.references)
1135
+
1136
+ def isfile(self, path): # overwrite auto-sync version
1137
+ return path in self.references
1138
+
1139
+ async def _ls(self, path, detail=True, **kwargs): # calls fast sync code
1140
+ return self.ls(path, detail, **kwargs)
1141
+
1142
+ def find(self, path, maxdepth=None, withdirs=False, detail=False, **kwargs):
1143
+ if withdirs:
1144
+ return super().find(
1145
+ path, maxdepth=maxdepth, withdirs=withdirs, detail=detail, **kwargs
1146
+ )
1147
+ if path:
1148
+ path = self._strip_protocol(path)
1149
+ r = sorted(k for k in self.references if k.startswith(path))
1150
+ else:
1151
+ r = sorted(self.references)
1152
+ if detail:
1153
+ if not self.dircache:
1154
+ self._dircache_from_items()
1155
+ return {k: self._ls_from_cache(k)[0] for k in r}
1156
+ else:
1157
+ return r
1158
+
1159
+ def info(self, path, **kwargs):
1160
+ out = self.references.get(path)
1161
+ if out is not None:
1162
+ if isinstance(out, (str, bytes)):
1163
+ # decode base64 here
1164
+ return {"name": path, "type": "file", "size": len(out)}
1165
+ elif len(out) > 1:
1166
+ return {"name": path, "type": "file", "size": out[2]}
1167
+ else:
1168
+ out0 = [{"name": path, "type": "file", "size": None}]
1169
+ else:
1170
+ out = self.ls(path, True)
1171
+ out0 = [o for o in out if o["name"] == path]
1172
+ if not out0:
1173
+ return {"name": path, "type": "directory", "size": 0}
1174
+ if out0[0]["size"] is None:
1175
+ # if this is a whole remote file, update size using remote FS
1176
+ prot, _ = split_protocol(self.references[path][0])
1177
+ out0[0]["size"] = self.fss[prot].size(self.references[path][0])
1178
+ return out0[0]
1179
+
1180
+ async def _info(self, path, **kwargs): # calls fast sync code
1181
+ return self.info(path)
1182
+
1183
+ async def _rm_file(self, path, **kwargs):
1184
+ self.references.pop(
1185
+ path, None
1186
+ ) # ignores FileNotFound, just as well for directories
1187
+ self.dircache.clear() # this is a bit heavy handed
1188
+
1189
+ async def _pipe_file(self, path, data, mode="overwrite", **kwargs):
1190
+ if mode == "create" and self.exists(path):
1191
+ raise FileExistsError
1192
+ # can be str or bytes
1193
+ self.references[path] = data
1194
+ self.dircache.clear() # this is a bit heavy handed
1195
+
1196
+ async def _put_file(self, lpath, rpath, mode="overwrite", **kwargs):
1197
+ # puts binary
1198
+ if mode == "create" and self.exists(rpath):
1199
+ raise FileExistsError
1200
+ with open(lpath, "rb") as f:
1201
+ self.references[rpath] = f.read()
1202
+ self.dircache.clear() # this is a bit heavy handed
1203
+
1204
+ def save_json(self, url, **storage_options):
1205
+ """Write modified references into new location"""
1206
+ out = {}
1207
+ for k, v in self.references.items():
1208
+ if isinstance(v, bytes):
1209
+ try:
1210
+ out[k] = v.decode("ascii")
1211
+ except UnicodeDecodeError:
1212
+ out[k] = (b"base64:" + base64.b64encode(v)).decode()
1213
+ else:
1214
+ out[k] = v
1215
+ with fsspec.open(url, "wb", **storage_options) as f:
1216
+ f.write(json.dumps({"version": 1, "refs": out}).encode())