ZTWHHH commited on
Commit
029995a
·
verified ·
1 Parent(s): 581842d

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -0
  2. llava/lib/python3.10/collections/__pycache__/__init__.cpython-310.pyc +0 -0
  3. llava/lib/python3.10/ensurepip/__init__.py +294 -0
  4. llava/lib/python3.10/ensurepip/__main__.py +5 -0
  5. llava/lib/python3.10/ensurepip/__pycache__/__init__.cpython-310.pyc +0 -0
  6. llava/lib/python3.10/ensurepip/__pycache__/__main__.cpython-310.pyc +0 -0
  7. llava/lib/python3.10/ensurepip/__pycache__/_uninstall.cpython-310.pyc +0 -0
  8. llava/lib/python3.10/ensurepip/_bundled/__init__.py +0 -0
  9. llava/lib/python3.10/ensurepip/_bundled/__pycache__/__init__.cpython-310.pyc +0 -0
  10. llava/lib/python3.10/ensurepip/_bundled/setuptools-65.5.0-py3-none-any.whl +3 -0
  11. llava/lib/python3.10/ensurepip/_uninstall.py +31 -0
  12. llava/lib/python3.10/json/__init__.py +359 -0
  13. llava/lib/python3.10/json/decoder.py +356 -0
  14. llava/lib/python3.10/json/scanner.py +73 -0
  15. llava/lib/python3.10/json/tool.py +85 -0
  16. llava/lib/python3.10/multiprocessing/__init__.py +37 -0
  17. llava/lib/python3.10/multiprocessing/connection.py +973 -0
  18. llava/lib/python3.10/multiprocessing/context.py +376 -0
  19. llava/lib/python3.10/multiprocessing/forkserver.py +348 -0
  20. llava/lib/python3.10/multiprocessing/managers.py +1378 -0
  21. llava/lib/python3.10/multiprocessing/popen_forkserver.py +74 -0
  22. llava/lib/python3.10/multiprocessing/popen_spawn_posix.py +72 -0
  23. llava/lib/python3.10/multiprocessing/reduction.py +281 -0
  24. llava/lib/python3.10/multiprocessing/resource_sharer.py +154 -0
  25. llava/lib/python3.10/multiprocessing/shared_memory.py +534 -0
  26. llava/lib/python3.10/tkinter/__pycache__/__init__.cpython-310.pyc +3 -0
  27. llava/lib/python3.10/tkinter/__pycache__/commondialog.cpython-310.pyc +0 -0
  28. llava/lib/python3.10/tkinter/__pycache__/filedialog.cpython-310.pyc +0 -0
  29. llava/lib/python3.10/tkinter/__pycache__/simpledialog.cpython-310.pyc +0 -0
  30. llava/lib/python3.10/tkinter/__pycache__/tix.cpython-310.pyc +0 -0
  31. llava/lib/python3.10/tkinter/__pycache__/ttk.cpython-310.pyc +0 -0
  32. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool3d_native.h +24 -0
  33. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_amp_update_scale_compositeexplicitautograd_dispatch.h +25 -0
  34. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_assert_async.h +35 -0
  35. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_coalesce_ops.h +39 -0
  36. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_eigh_cuda_dispatch.h +25 -0
  37. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_native_batch_norm_legit_no_training.h +39 -0
  38. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_compute_contiguous_strides_offsets_ops.h +28 -0
  39. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_cudnn_attention_backward.h +47 -0
  40. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_flash_attention_for_cpu.h +30 -0
  41. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_bsc_tensor_unsafe_native.h +21 -0
  42. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csr_sum_ops.h +39 -0
  43. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_log_softmax_compositeimplicitautograd_dispatch.h +24 -0
  44. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_to_dense_compositeexplicitautograd_dispatch.h +24 -0
  45. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/abs_ops.h +50 -0
  46. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/argmin_ops.h +39 -0
  47. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/broadcast_to_compositeimplicitautograd_dispatch.h +24 -0
  48. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/conj_physical_cpu_dispatch.h +24 -0
  49. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_affine_grid_generator.h +39 -0
  50. parrot/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_grid_sampler_backward.h +39 -0
.gitattributes CHANGED
@@ -432,3 +432,5 @@ llava/lib/libstdc++.so filter=lfs diff=lfs merge=lfs -text
432
  llava/lib/libasan.so filter=lfs diff=lfs merge=lfs -text
433
  openflamingo/bin/python filter=lfs diff=lfs merge=lfs -text
434
  llava/lib/libtcl8.6.so filter=lfs diff=lfs merge=lfs -text
 
 
 
432
  llava/lib/libasan.so filter=lfs diff=lfs merge=lfs -text
433
  openflamingo/bin/python filter=lfs diff=lfs merge=lfs -text
434
  llava/lib/libtcl8.6.so filter=lfs diff=lfs merge=lfs -text
435
+ llava/lib/python3.10/ensurepip/_bundled/setuptools-65.5.0-py3-none-any.whl filter=lfs diff=lfs merge=lfs -text
436
+ llava/lib/python3.10/tkinter/__pycache__/__init__.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
llava/lib/python3.10/collections/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (48.4 kB). View file
 
llava/lib/python3.10/ensurepip/__init__.py ADDED
@@ -0,0 +1,294 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import os
3
+ import os.path
4
+ import subprocess
5
+ import sys
6
+ import sysconfig
7
+ import tempfile
8
+ from importlib import resources
9
+
10
+
11
+
12
+ __all__ = ["version", "bootstrap"]
13
+ _PACKAGE_NAMES = ('setuptools', 'pip')
14
+ _SETUPTOOLS_VERSION = "65.5.0"
15
+ _PIP_VERSION = "23.0.1"
16
+ _PROJECTS = [
17
+ ("setuptools", _SETUPTOOLS_VERSION, "py3"),
18
+ ("pip", _PIP_VERSION, "py3"),
19
+ ]
20
+
21
+ # Packages bundled in ensurepip._bundled have wheel_name set.
22
+ # Packages from WHEEL_PKG_DIR have wheel_path set.
23
+ _Package = collections.namedtuple('Package',
24
+ ('version', 'wheel_name', 'wheel_path'))
25
+
26
+ # Directory of system wheel packages. Some Linux distribution packaging
27
+ # policies recommend against bundling dependencies. For example, Fedora
28
+ # installs wheel packages in the /usr/share/python-wheels/ directory and don't
29
+ # install the ensurepip._bundled package.
30
+ _WHEEL_PKG_DIR = sysconfig.get_config_var('WHEEL_PKG_DIR')
31
+
32
+
33
+ def _find_packages(path):
34
+ packages = {}
35
+ try:
36
+ filenames = os.listdir(path)
37
+ except OSError:
38
+ # Ignore: path doesn't exist or permission error
39
+ filenames = ()
40
+ # Make the code deterministic if a directory contains multiple wheel files
41
+ # of the same package, but don't attempt to implement correct version
42
+ # comparison since this case should not happen.
43
+ filenames = sorted(filenames)
44
+ for filename in filenames:
45
+ # filename is like 'pip-21.2.4-py3-none-any.whl'
46
+ if not filename.endswith(".whl"):
47
+ continue
48
+ for name in _PACKAGE_NAMES:
49
+ prefix = name + '-'
50
+ if filename.startswith(prefix):
51
+ break
52
+ else:
53
+ continue
54
+
55
+ # Extract '21.2.4' from 'pip-21.2.4-py3-none-any.whl'
56
+ version = filename.removeprefix(prefix).partition('-')[0]
57
+ wheel_path = os.path.join(path, filename)
58
+ packages[name] = _Package(version, None, wheel_path)
59
+ return packages
60
+
61
+
62
+ def _get_packages():
63
+ global _PACKAGES, _WHEEL_PKG_DIR
64
+ if _PACKAGES is not None:
65
+ return _PACKAGES
66
+
67
+ packages = {}
68
+ for name, version, py_tag in _PROJECTS:
69
+ wheel_name = f"{name}-{version}-{py_tag}-none-any.whl"
70
+ packages[name] = _Package(version, wheel_name, None)
71
+ if _WHEEL_PKG_DIR:
72
+ dir_packages = _find_packages(_WHEEL_PKG_DIR)
73
+ # only used the wheel package directory if all packages are found there
74
+ if all(name in dir_packages for name in _PACKAGE_NAMES):
75
+ packages = dir_packages
76
+ _PACKAGES = packages
77
+ return packages
78
+ _PACKAGES = None
79
+
80
+
81
+ def _run_pip(args, additional_paths=None):
82
+ # Run the bootstraping in a subprocess to avoid leaking any state that happens
83
+ # after pip has executed. Particulary, this avoids the case when pip holds onto
84
+ # the files in *additional_paths*, preventing us to remove them at the end of the
85
+ # invocation.
86
+ code = f"""
87
+ import runpy
88
+ import sys
89
+ sys.path = {additional_paths or []} + sys.path
90
+ sys.argv[1:] = {args}
91
+ runpy.run_module("pip", run_name="__main__", alter_sys=True)
92
+ """
93
+
94
+ cmd = [
95
+ sys.executable,
96
+ '-W',
97
+ 'ignore::DeprecationWarning',
98
+ '-c',
99
+ code,
100
+ ]
101
+ if sys.flags.isolated:
102
+ # run code in isolated mode if currently running isolated
103
+ cmd.insert(1, '-I')
104
+ return subprocess.run(cmd, check=True).returncode
105
+
106
+
107
+ def version():
108
+ """
109
+ Returns a string specifying the bundled version of pip.
110
+ """
111
+ return _get_packages()['pip'].version
112
+
113
+
114
+ def _disable_pip_configuration_settings():
115
+ # We deliberately ignore all pip environment variables
116
+ # when invoking pip
117
+ # See http://bugs.python.org/issue19734 for details
118
+ keys_to_remove = [k for k in os.environ if k.startswith("PIP_")]
119
+ for k in keys_to_remove:
120
+ del os.environ[k]
121
+ # We also ignore the settings in the default pip configuration file
122
+ # See http://bugs.python.org/issue20053 for details
123
+ os.environ['PIP_CONFIG_FILE'] = os.devnull
124
+
125
+
126
+ def bootstrap(*, root=None, upgrade=False, user=False,
127
+ altinstall=False, default_pip=False,
128
+ verbosity=0):
129
+ """
130
+ Bootstrap pip into the current Python installation (or the given root
131
+ directory).
132
+
133
+ Note that calling this function will alter both sys.path and os.environ.
134
+ """
135
+ # Discard the return value
136
+ _bootstrap(root=root, upgrade=upgrade, user=user,
137
+ altinstall=altinstall, default_pip=default_pip,
138
+ verbosity=verbosity)
139
+
140
+
141
+ def _bootstrap(*, root=None, upgrade=False, user=False,
142
+ altinstall=False, default_pip=False,
143
+ verbosity=0):
144
+ """
145
+ Bootstrap pip into the current Python installation (or the given root
146
+ directory). Returns pip command status code.
147
+
148
+ Note that calling this function will alter both sys.path and os.environ.
149
+ """
150
+ if altinstall and default_pip:
151
+ raise ValueError("Cannot use altinstall and default_pip together")
152
+
153
+ sys.audit("ensurepip.bootstrap", root)
154
+
155
+ _disable_pip_configuration_settings()
156
+
157
+ # By default, installing pip and setuptools installs all of the
158
+ # following scripts (X.Y == running Python version):
159
+ #
160
+ # pip, pipX, pipX.Y, easy_install, easy_install-X.Y
161
+ #
162
+ # pip 1.5+ allows ensurepip to request that some of those be left out
163
+ if altinstall:
164
+ # omit pip, pipX and easy_install
165
+ os.environ["ENSUREPIP_OPTIONS"] = "altinstall"
166
+ elif not default_pip:
167
+ # omit pip and easy_install
168
+ os.environ["ENSUREPIP_OPTIONS"] = "install"
169
+
170
+ with tempfile.TemporaryDirectory() as tmpdir:
171
+ # Put our bundled wheels into a temporary directory and construct the
172
+ # additional paths that need added to sys.path
173
+ additional_paths = []
174
+ for name, package in _get_packages().items():
175
+ if package.wheel_name:
176
+ # Use bundled wheel package
177
+ from ensurepip import _bundled
178
+ wheel_name = package.wheel_name
179
+ whl = resources.read_binary(_bundled, wheel_name)
180
+ else:
181
+ # Use the wheel package directory
182
+ with open(package.wheel_path, "rb") as fp:
183
+ whl = fp.read()
184
+ wheel_name = os.path.basename(package.wheel_path)
185
+
186
+ filename = os.path.join(tmpdir, wheel_name)
187
+ with open(filename, "wb") as fp:
188
+ fp.write(whl)
189
+
190
+ additional_paths.append(filename)
191
+
192
+ # Construct the arguments to be passed to the pip command
193
+ args = ["install", "--no-cache-dir", "--no-index", "--find-links", tmpdir]
194
+ if root:
195
+ args += ["--root", root]
196
+ if upgrade:
197
+ args += ["--upgrade"]
198
+ if user:
199
+ args += ["--user"]
200
+ if verbosity:
201
+ args += ["-" + "v" * verbosity]
202
+
203
+ return _run_pip([*args, *_PACKAGE_NAMES], additional_paths)
204
+
205
+ def _uninstall_helper(*, verbosity=0):
206
+ """Helper to support a clean default uninstall process on Windows
207
+
208
+ Note that calling this function may alter os.environ.
209
+ """
210
+ # Nothing to do if pip was never installed, or has been removed
211
+ try:
212
+ import pip
213
+ except ImportError:
214
+ return
215
+
216
+ # If the installed pip version doesn't match the available one,
217
+ # leave it alone
218
+ available_version = version()
219
+ if pip.__version__ != available_version:
220
+ print(f"ensurepip will only uninstall a matching version "
221
+ f"({pip.__version__!r} installed, "
222
+ f"{available_version!r} available)",
223
+ file=sys.stderr)
224
+ return
225
+
226
+ _disable_pip_configuration_settings()
227
+
228
+ # Construct the arguments to be passed to the pip command
229
+ args = ["uninstall", "-y", "--disable-pip-version-check"]
230
+ if verbosity:
231
+ args += ["-" + "v" * verbosity]
232
+
233
+ return _run_pip([*args, *reversed(_PACKAGE_NAMES)])
234
+
235
+
236
+ def _main(argv=None):
237
+ import argparse
238
+ parser = argparse.ArgumentParser(prog="python -m ensurepip")
239
+ parser.add_argument(
240
+ "--version",
241
+ action="version",
242
+ version="pip {}".format(version()),
243
+ help="Show the version of pip that is bundled with this Python.",
244
+ )
245
+ parser.add_argument(
246
+ "-v", "--verbose",
247
+ action="count",
248
+ default=0,
249
+ dest="verbosity",
250
+ help=("Give more output. Option is additive, and can be used up to 3 "
251
+ "times."),
252
+ )
253
+ parser.add_argument(
254
+ "-U", "--upgrade",
255
+ action="store_true",
256
+ default=False,
257
+ help="Upgrade pip and dependencies, even if already installed.",
258
+ )
259
+ parser.add_argument(
260
+ "--user",
261
+ action="store_true",
262
+ default=False,
263
+ help="Install using the user scheme.",
264
+ )
265
+ parser.add_argument(
266
+ "--root",
267
+ default=None,
268
+ help="Install everything relative to this alternate root directory.",
269
+ )
270
+ parser.add_argument(
271
+ "--altinstall",
272
+ action="store_true",
273
+ default=False,
274
+ help=("Make an alternate install, installing only the X.Y versioned "
275
+ "scripts (Default: pipX, pipX.Y, easy_install-X.Y)."),
276
+ )
277
+ parser.add_argument(
278
+ "--default-pip",
279
+ action="store_true",
280
+ default=False,
281
+ help=("Make a default pip install, installing the unqualified pip "
282
+ "and easy_install in addition to the versioned scripts."),
283
+ )
284
+
285
+ args = parser.parse_args(argv)
286
+
287
+ return _bootstrap(
288
+ root=args.root,
289
+ upgrade=args.upgrade,
290
+ user=args.user,
291
+ verbosity=args.verbosity,
292
+ altinstall=args.altinstall,
293
+ default_pip=args.default_pip,
294
+ )
llava/lib/python3.10/ensurepip/__main__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ import ensurepip
2
+ import sys
3
+
4
+ if __name__ == "__main__":
5
+ sys.exit(ensurepip._main())
llava/lib/python3.10/ensurepip/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (6.57 kB). View file
 
llava/lib/python3.10/ensurepip/__pycache__/__main__.cpython-310.pyc ADDED
Binary file (489 Bytes). View file
 
llava/lib/python3.10/ensurepip/__pycache__/_uninstall.cpython-310.pyc ADDED
Binary file (1.2 kB). View file
 
llava/lib/python3.10/ensurepip/_bundled/__init__.py ADDED
File without changes
llava/lib/python3.10/ensurepip/_bundled/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (397 Bytes). View file
 
llava/lib/python3.10/ensurepip/_bundled/setuptools-65.5.0-py3-none-any.whl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f62ea9da9ed6289bfe868cd6845968a2c854d1427f8548d52cae02a42b4f0356
3
+ size 1232695
llava/lib/python3.10/ensurepip/_uninstall.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Basic pip uninstallation support, helper for the Windows uninstaller"""
2
+
3
+ import argparse
4
+ import ensurepip
5
+ import sys
6
+
7
+
8
+ def _main(argv=None):
9
+ parser = argparse.ArgumentParser(prog="python -m ensurepip._uninstall")
10
+ parser.add_argument(
11
+ "--version",
12
+ action="version",
13
+ version="pip {}".format(ensurepip.version()),
14
+ help="Show the version of pip this will attempt to uninstall.",
15
+ )
16
+ parser.add_argument(
17
+ "-v", "--verbose",
18
+ action="count",
19
+ default=0,
20
+ dest="verbosity",
21
+ help=("Give more output. Option is additive, and can be used up to 3 "
22
+ "times."),
23
+ )
24
+
25
+ args = parser.parse_args(argv)
26
+
27
+ return ensurepip._uninstall_helper(verbosity=args.verbosity)
28
+
29
+
30
+ if __name__ == "__main__":
31
+ sys.exit(_main())
llava/lib/python3.10/json/__init__.py ADDED
@@ -0,0 +1,359 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ r"""JSON (JavaScript Object Notation) <https://json.org> is a subset of
2
+ JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
3
+ interchange format.
4
+
5
+ :mod:`json` exposes an API familiar to users of the standard library
6
+ :mod:`marshal` and :mod:`pickle` modules. It is derived from a
7
+ version of the externally maintained simplejson library.
8
+
9
+ Encoding basic Python object hierarchies::
10
+
11
+ >>> import json
12
+ >>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
13
+ '["foo", {"bar": ["baz", null, 1.0, 2]}]'
14
+ >>> print(json.dumps("\"foo\bar"))
15
+ "\"foo\bar"
16
+ >>> print(json.dumps('\u1234'))
17
+ "\u1234"
18
+ >>> print(json.dumps('\\'))
19
+ "\\"
20
+ >>> print(json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True))
21
+ {"a": 0, "b": 0, "c": 0}
22
+ >>> from io import StringIO
23
+ >>> io = StringIO()
24
+ >>> json.dump(['streaming API'], io)
25
+ >>> io.getvalue()
26
+ '["streaming API"]'
27
+
28
+ Compact encoding::
29
+
30
+ >>> import json
31
+ >>> mydict = {'4': 5, '6': 7}
32
+ >>> json.dumps([1,2,3,mydict], separators=(',', ':'))
33
+ '[1,2,3,{"4":5,"6":7}]'
34
+
35
+ Pretty printing::
36
+
37
+ >>> import json
38
+ >>> print(json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4))
39
+ {
40
+ "4": 5,
41
+ "6": 7
42
+ }
43
+
44
+ Decoding JSON::
45
+
46
+ >>> import json
47
+ >>> obj = ['foo', {'bar': ['baz', None, 1.0, 2]}]
48
+ >>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
49
+ True
50
+ >>> json.loads('"\\"foo\\bar"') == '"foo\x08ar'
51
+ True
52
+ >>> from io import StringIO
53
+ >>> io = StringIO('["streaming API"]')
54
+ >>> json.load(io)[0] == 'streaming API'
55
+ True
56
+
57
+ Specializing JSON object decoding::
58
+
59
+ >>> import json
60
+ >>> def as_complex(dct):
61
+ ... if '__complex__' in dct:
62
+ ... return complex(dct['real'], dct['imag'])
63
+ ... return dct
64
+ ...
65
+ >>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
66
+ ... object_hook=as_complex)
67
+ (1+2j)
68
+ >>> from decimal import Decimal
69
+ >>> json.loads('1.1', parse_float=Decimal) == Decimal('1.1')
70
+ True
71
+
72
+ Specializing JSON object encoding::
73
+
74
+ >>> import json
75
+ >>> def encode_complex(obj):
76
+ ... if isinstance(obj, complex):
77
+ ... return [obj.real, obj.imag]
78
+ ... raise TypeError(f'Object of type {obj.__class__.__name__} '
79
+ ... f'is not JSON serializable')
80
+ ...
81
+ >>> json.dumps(2 + 1j, default=encode_complex)
82
+ '[2.0, 1.0]'
83
+ >>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
84
+ '[2.0, 1.0]'
85
+ >>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
86
+ '[2.0, 1.0]'
87
+
88
+
89
+ Using json.tool from the shell to validate and pretty-print::
90
+
91
+ $ echo '{"json":"obj"}' | python -m json.tool
92
+ {
93
+ "json": "obj"
94
+ }
95
+ $ echo '{ 1.2:3.4}' | python -m json.tool
96
+ Expecting property name enclosed in double quotes: line 1 column 3 (char 2)
97
+ """
98
+ __version__ = '2.0.9'
99
+ __all__ = [
100
+ 'dump', 'dumps', 'load', 'loads',
101
+ 'JSONDecoder', 'JSONDecodeError', 'JSONEncoder',
102
+ ]
103
+
104
+ __author__ = 'Bob Ippolito <bob@redivi.com>'
105
+
106
+ from .decoder import JSONDecoder, JSONDecodeError
107
+ from .encoder import JSONEncoder
108
+ import codecs
109
+
110
+ _default_encoder = JSONEncoder(
111
+ skipkeys=False,
112
+ ensure_ascii=True,
113
+ check_circular=True,
114
+ allow_nan=True,
115
+ indent=None,
116
+ separators=None,
117
+ default=None,
118
+ )
119
+
120
+ def dump(obj, fp, *, skipkeys=False, ensure_ascii=True, check_circular=True,
121
+ allow_nan=True, cls=None, indent=None, separators=None,
122
+ default=None, sort_keys=False, **kw):
123
+ """Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
124
+ ``.write()``-supporting file-like object).
125
+
126
+ If ``skipkeys`` is true then ``dict`` keys that are not basic types
127
+ (``str``, ``int``, ``float``, ``bool``, ``None``) will be skipped
128
+ instead of raising a ``TypeError``.
129
+
130
+ If ``ensure_ascii`` is false, then the strings written to ``fp`` can
131
+ contain non-ASCII characters if they appear in strings contained in
132
+ ``obj``. Otherwise, all such characters are escaped in JSON strings.
133
+
134
+ If ``check_circular`` is false, then the circular reference check
135
+ for container types will be skipped and a circular reference will
136
+ result in an ``RecursionError`` (or worse).
137
+
138
+ If ``allow_nan`` is false, then it will be a ``ValueError`` to
139
+ serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
140
+ in strict compliance of the JSON specification, instead of using the
141
+ JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
142
+
143
+ If ``indent`` is a non-negative integer, then JSON array elements and
144
+ object members will be pretty-printed with that indent level. An indent
145
+ level of 0 will only insert newlines. ``None`` is the most compact
146
+ representation.
147
+
148
+ If specified, ``separators`` should be an ``(item_separator, key_separator)``
149
+ tuple. The default is ``(', ', ': ')`` if *indent* is ``None`` and
150
+ ``(',', ': ')`` otherwise. To get the most compact JSON representation,
151
+ you should specify ``(',', ':')`` to eliminate whitespace.
152
+
153
+ ``default(obj)`` is a function that should return a serializable version
154
+ of obj or raise TypeError. The default simply raises TypeError.
155
+
156
+ If *sort_keys* is true (default: ``False``), then the output of
157
+ dictionaries will be sorted by key.
158
+
159
+ To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
160
+ ``.default()`` method to serialize additional types), specify it with
161
+ the ``cls`` kwarg; otherwise ``JSONEncoder`` is used.
162
+
163
+ """
164
+ # cached encoder
165
+ if (not skipkeys and ensure_ascii and
166
+ check_circular and allow_nan and
167
+ cls is None and indent is None and separators is None and
168
+ default is None and not sort_keys and not kw):
169
+ iterable = _default_encoder.iterencode(obj)
170
+ else:
171
+ if cls is None:
172
+ cls = JSONEncoder
173
+ iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
174
+ check_circular=check_circular, allow_nan=allow_nan, indent=indent,
175
+ separators=separators,
176
+ default=default, sort_keys=sort_keys, **kw).iterencode(obj)
177
+ # could accelerate with writelines in some versions of Python, at
178
+ # a debuggability cost
179
+ for chunk in iterable:
180
+ fp.write(chunk)
181
+
182
+
183
+ def dumps(obj, *, skipkeys=False, ensure_ascii=True, check_circular=True,
184
+ allow_nan=True, cls=None, indent=None, separators=None,
185
+ default=None, sort_keys=False, **kw):
186
+ """Serialize ``obj`` to a JSON formatted ``str``.
187
+
188
+ If ``skipkeys`` is true then ``dict`` keys that are not basic types
189
+ (``str``, ``int``, ``float``, ``bool``, ``None``) will be skipped
190
+ instead of raising a ``TypeError``.
191
+
192
+ If ``ensure_ascii`` is false, then the return value can contain non-ASCII
193
+ characters if they appear in strings contained in ``obj``. Otherwise, all
194
+ such characters are escaped in JSON strings.
195
+
196
+ If ``check_circular`` is false, then the circular reference check
197
+ for container types will be skipped and a circular reference will
198
+ result in an ``RecursionError`` (or worse).
199
+
200
+ If ``allow_nan`` is false, then it will be a ``ValueError`` to
201
+ serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
202
+ strict compliance of the JSON specification, instead of using the
203
+ JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
204
+
205
+ If ``indent`` is a non-negative integer, then JSON array elements and
206
+ object members will be pretty-printed with that indent level. An indent
207
+ level of 0 will only insert newlines. ``None`` is the most compact
208
+ representation.
209
+
210
+ If specified, ``separators`` should be an ``(item_separator, key_separator)``
211
+ tuple. The default is ``(', ', ': ')`` if *indent* is ``None`` and
212
+ ``(',', ': ')`` otherwise. To get the most compact JSON representation,
213
+ you should specify ``(',', ':')`` to eliminate whitespace.
214
+
215
+ ``default(obj)`` is a function that should return a serializable version
216
+ of obj or raise TypeError. The default simply raises TypeError.
217
+
218
+ If *sort_keys* is true (default: ``False``), then the output of
219
+ dictionaries will be sorted by key.
220
+
221
+ To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
222
+ ``.default()`` method to serialize additional types), specify it with
223
+ the ``cls`` kwarg; otherwise ``JSONEncoder`` is used.
224
+
225
+ """
226
+ # cached encoder
227
+ if (not skipkeys and ensure_ascii and
228
+ check_circular and allow_nan and
229
+ cls is None and indent is None and separators is None and
230
+ default is None and not sort_keys and not kw):
231
+ return _default_encoder.encode(obj)
232
+ if cls is None:
233
+ cls = JSONEncoder
234
+ return cls(
235
+ skipkeys=skipkeys, ensure_ascii=ensure_ascii,
236
+ check_circular=check_circular, allow_nan=allow_nan, indent=indent,
237
+ separators=separators, default=default, sort_keys=sort_keys,
238
+ **kw).encode(obj)
239
+
240
+
241
+ _default_decoder = JSONDecoder(object_hook=None, object_pairs_hook=None)
242
+
243
+
244
+ def detect_encoding(b):
245
+ bstartswith = b.startswith
246
+ if bstartswith((codecs.BOM_UTF32_BE, codecs.BOM_UTF32_LE)):
247
+ return 'utf-32'
248
+ if bstartswith((codecs.BOM_UTF16_BE, codecs.BOM_UTF16_LE)):
249
+ return 'utf-16'
250
+ if bstartswith(codecs.BOM_UTF8):
251
+ return 'utf-8-sig'
252
+
253
+ if len(b) >= 4:
254
+ if not b[0]:
255
+ # 00 00 -- -- - utf-32-be
256
+ # 00 XX -- -- - utf-16-be
257
+ return 'utf-16-be' if b[1] else 'utf-32-be'
258
+ if not b[1]:
259
+ # XX 00 00 00 - utf-32-le
260
+ # XX 00 00 XX - utf-16-le
261
+ # XX 00 XX -- - utf-16-le
262
+ return 'utf-16-le' if b[2] or b[3] else 'utf-32-le'
263
+ elif len(b) == 2:
264
+ if not b[0]:
265
+ # 00 XX - utf-16-be
266
+ return 'utf-16-be'
267
+ if not b[1]:
268
+ # XX 00 - utf-16-le
269
+ return 'utf-16-le'
270
+ # default
271
+ return 'utf-8'
272
+
273
+
274
+ def load(fp, *, cls=None, object_hook=None, parse_float=None,
275
+ parse_int=None, parse_constant=None, object_pairs_hook=None, **kw):
276
+ """Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
277
+ a JSON document) to a Python object.
278
+
279
+ ``object_hook`` is an optional function that will be called with the
280
+ result of any object literal decode (a ``dict``). The return value of
281
+ ``object_hook`` will be used instead of the ``dict``. This feature
282
+ can be used to implement custom decoders (e.g. JSON-RPC class hinting).
283
+
284
+ ``object_pairs_hook`` is an optional function that will be called with the
285
+ result of any object literal decoded with an ordered list of pairs. The
286
+ return value of ``object_pairs_hook`` will be used instead of the ``dict``.
287
+ This feature can be used to implement custom decoders. If ``object_hook``
288
+ is also defined, the ``object_pairs_hook`` takes priority.
289
+
290
+ To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
291
+ kwarg; otherwise ``JSONDecoder`` is used.
292
+ """
293
+ return loads(fp.read(),
294
+ cls=cls, object_hook=object_hook,
295
+ parse_float=parse_float, parse_int=parse_int,
296
+ parse_constant=parse_constant, object_pairs_hook=object_pairs_hook, **kw)
297
+
298
+
299
+ def loads(s, *, cls=None, object_hook=None, parse_float=None,
300
+ parse_int=None, parse_constant=None, object_pairs_hook=None, **kw):
301
+ """Deserialize ``s`` (a ``str``, ``bytes`` or ``bytearray`` instance
302
+ containing a JSON document) to a Python object.
303
+
304
+ ``object_hook`` is an optional function that will be called with the
305
+ result of any object literal decode (a ``dict``). The return value of
306
+ ``object_hook`` will be used instead of the ``dict``. This feature
307
+ can be used to implement custom decoders (e.g. JSON-RPC class hinting).
308
+
309
+ ``object_pairs_hook`` is an optional function that will be called with the
310
+ result of any object literal decoded with an ordered list of pairs. The
311
+ return value of ``object_pairs_hook`` will be used instead of the ``dict``.
312
+ This feature can be used to implement custom decoders. If ``object_hook``
313
+ is also defined, the ``object_pairs_hook`` takes priority.
314
+
315
+ ``parse_float``, if specified, will be called with the string
316
+ of every JSON float to be decoded. By default this is equivalent to
317
+ float(num_str). This can be used to use another datatype or parser
318
+ for JSON floats (e.g. decimal.Decimal).
319
+
320
+ ``parse_int``, if specified, will be called with the string
321
+ of every JSON int to be decoded. By default this is equivalent to
322
+ int(num_str). This can be used to use another datatype or parser
323
+ for JSON integers (e.g. float).
324
+
325
+ ``parse_constant``, if specified, will be called with one of the
326
+ following strings: -Infinity, Infinity, NaN.
327
+ This can be used to raise an exception if invalid JSON numbers
328
+ are encountered.
329
+
330
+ To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
331
+ kwarg; otherwise ``JSONDecoder`` is used.
332
+ """
333
+ if isinstance(s, str):
334
+ if s.startswith('\ufeff'):
335
+ raise JSONDecodeError("Unexpected UTF-8 BOM (decode using utf-8-sig)",
336
+ s, 0)
337
+ else:
338
+ if not isinstance(s, (bytes, bytearray)):
339
+ raise TypeError(f'the JSON object must be str, bytes or bytearray, '
340
+ f'not {s.__class__.__name__}')
341
+ s = s.decode(detect_encoding(s), 'surrogatepass')
342
+
343
+ if (cls is None and object_hook is None and
344
+ parse_int is None and parse_float is None and
345
+ parse_constant is None and object_pairs_hook is None and not kw):
346
+ return _default_decoder.decode(s)
347
+ if cls is None:
348
+ cls = JSONDecoder
349
+ if object_hook is not None:
350
+ kw['object_hook'] = object_hook
351
+ if object_pairs_hook is not None:
352
+ kw['object_pairs_hook'] = object_pairs_hook
353
+ if parse_float is not None:
354
+ kw['parse_float'] = parse_float
355
+ if parse_int is not None:
356
+ kw['parse_int'] = parse_int
357
+ if parse_constant is not None:
358
+ kw['parse_constant'] = parse_constant
359
+ return cls(**kw).decode(s)
llava/lib/python3.10/json/decoder.py ADDED
@@ -0,0 +1,356 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Implementation of JSONDecoder
2
+ """
3
+ import re
4
+
5
+ from json import scanner
6
+ try:
7
+ from _json import scanstring as c_scanstring
8
+ except ImportError:
9
+ c_scanstring = None
10
+
11
+ __all__ = ['JSONDecoder', 'JSONDecodeError']
12
+
13
+ FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
14
+
15
+ NaN = float('nan')
16
+ PosInf = float('inf')
17
+ NegInf = float('-inf')
18
+
19
+
20
+ class JSONDecodeError(ValueError):
21
+ """Subclass of ValueError with the following additional properties:
22
+
23
+ msg: The unformatted error message
24
+ doc: The JSON document being parsed
25
+ pos: The start index of doc where parsing failed
26
+ lineno: The line corresponding to pos
27
+ colno: The column corresponding to pos
28
+
29
+ """
30
+ # Note that this exception is used from _json
31
+ def __init__(self, msg, doc, pos):
32
+ lineno = doc.count('\n', 0, pos) + 1
33
+ colno = pos - doc.rfind('\n', 0, pos)
34
+ errmsg = '%s: line %d column %d (char %d)' % (msg, lineno, colno, pos)
35
+ ValueError.__init__(self, errmsg)
36
+ self.msg = msg
37
+ self.doc = doc
38
+ self.pos = pos
39
+ self.lineno = lineno
40
+ self.colno = colno
41
+
42
+ def __reduce__(self):
43
+ return self.__class__, (self.msg, self.doc, self.pos)
44
+
45
+
46
+ _CONSTANTS = {
47
+ '-Infinity': NegInf,
48
+ 'Infinity': PosInf,
49
+ 'NaN': NaN,
50
+ }
51
+
52
+
53
+ STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
54
+ BACKSLASH = {
55
+ '"': '"', '\\': '\\', '/': '/',
56
+ 'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t',
57
+ }
58
+
59
+ def _decode_uXXXX(s, pos):
60
+ esc = s[pos + 1:pos + 5]
61
+ if len(esc) == 4 and esc[1] not in 'xX':
62
+ try:
63
+ return int(esc, 16)
64
+ except ValueError:
65
+ pass
66
+ msg = "Invalid \\uXXXX escape"
67
+ raise JSONDecodeError(msg, s, pos)
68
+
69
+ def py_scanstring(s, end, strict=True,
70
+ _b=BACKSLASH, _m=STRINGCHUNK.match):
71
+ """Scan the string s for a JSON string. End is the index of the
72
+ character in s after the quote that started the JSON string.
73
+ Unescapes all valid JSON string escape sequences and raises ValueError
74
+ on attempt to decode an invalid string. If strict is False then literal
75
+ control characters are allowed in the string.
76
+
77
+ Returns a tuple of the decoded string and the index of the character in s
78
+ after the end quote."""
79
+ chunks = []
80
+ _append = chunks.append
81
+ begin = end - 1
82
+ while 1:
83
+ chunk = _m(s, end)
84
+ if chunk is None:
85
+ raise JSONDecodeError("Unterminated string starting at", s, begin)
86
+ end = chunk.end()
87
+ content, terminator = chunk.groups()
88
+ # Content is contains zero or more unescaped string characters
89
+ if content:
90
+ _append(content)
91
+ # Terminator is the end of string, a literal control character,
92
+ # or a backslash denoting that an escape sequence follows
93
+ if terminator == '"':
94
+ break
95
+ elif terminator != '\\':
96
+ if strict:
97
+ #msg = "Invalid control character %r at" % (terminator,)
98
+ msg = "Invalid control character {0!r} at".format(terminator)
99
+ raise JSONDecodeError(msg, s, end)
100
+ else:
101
+ _append(terminator)
102
+ continue
103
+ try:
104
+ esc = s[end]
105
+ except IndexError:
106
+ raise JSONDecodeError("Unterminated string starting at",
107
+ s, begin) from None
108
+ # If not a unicode escape sequence, must be in the lookup table
109
+ if esc != 'u':
110
+ try:
111
+ char = _b[esc]
112
+ except KeyError:
113
+ msg = "Invalid \\escape: {0!r}".format(esc)
114
+ raise JSONDecodeError(msg, s, end)
115
+ end += 1
116
+ else:
117
+ uni = _decode_uXXXX(s, end)
118
+ end += 5
119
+ if 0xd800 <= uni <= 0xdbff and s[end:end + 2] == '\\u':
120
+ uni2 = _decode_uXXXX(s, end + 1)
121
+ if 0xdc00 <= uni2 <= 0xdfff:
122
+ uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))
123
+ end += 6
124
+ char = chr(uni)
125
+ _append(char)
126
+ return ''.join(chunks), end
127
+
128
+
129
+ # Use speedup if available
130
+ scanstring = c_scanstring or py_scanstring
131
+
132
+ WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
133
+ WHITESPACE_STR = ' \t\n\r'
134
+
135
+
136
+ def JSONObject(s_and_end, strict, scan_once, object_hook, object_pairs_hook,
137
+ memo=None, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
138
+ s, end = s_and_end
139
+ pairs = []
140
+ pairs_append = pairs.append
141
+ # Backwards compatibility
142
+ if memo is None:
143
+ memo = {}
144
+ memo_get = memo.setdefault
145
+ # Use a slice to prevent IndexError from being raised, the following
146
+ # check will raise a more specific ValueError if the string is empty
147
+ nextchar = s[end:end + 1]
148
+ # Normally we expect nextchar == '"'
149
+ if nextchar != '"':
150
+ if nextchar in _ws:
151
+ end = _w(s, end).end()
152
+ nextchar = s[end:end + 1]
153
+ # Trivial empty object
154
+ if nextchar == '}':
155
+ if object_pairs_hook is not None:
156
+ result = object_pairs_hook(pairs)
157
+ return result, end + 1
158
+ pairs = {}
159
+ if object_hook is not None:
160
+ pairs = object_hook(pairs)
161
+ return pairs, end + 1
162
+ elif nextchar != '"':
163
+ raise JSONDecodeError(
164
+ "Expecting property name enclosed in double quotes", s, end)
165
+ end += 1
166
+ while True:
167
+ key, end = scanstring(s, end, strict)
168
+ key = memo_get(key, key)
169
+ # To skip some function call overhead we optimize the fast paths where
170
+ # the JSON key separator is ": " or just ":".
171
+ if s[end:end + 1] != ':':
172
+ end = _w(s, end).end()
173
+ if s[end:end + 1] != ':':
174
+ raise JSONDecodeError("Expecting ':' delimiter", s, end)
175
+ end += 1
176
+
177
+ try:
178
+ if s[end] in _ws:
179
+ end += 1
180
+ if s[end] in _ws:
181
+ end = _w(s, end + 1).end()
182
+ except IndexError:
183
+ pass
184
+
185
+ try:
186
+ value, end = scan_once(s, end)
187
+ except StopIteration as err:
188
+ raise JSONDecodeError("Expecting value", s, err.value) from None
189
+ pairs_append((key, value))
190
+ try:
191
+ nextchar = s[end]
192
+ if nextchar in _ws:
193
+ end = _w(s, end + 1).end()
194
+ nextchar = s[end]
195
+ except IndexError:
196
+ nextchar = ''
197
+ end += 1
198
+
199
+ if nextchar == '}':
200
+ break
201
+ elif nextchar != ',':
202
+ raise JSONDecodeError("Expecting ',' delimiter", s, end - 1)
203
+ end = _w(s, end).end()
204
+ nextchar = s[end:end + 1]
205
+ end += 1
206
+ if nextchar != '"':
207
+ raise JSONDecodeError(
208
+ "Expecting property name enclosed in double quotes", s, end - 1)
209
+ if object_pairs_hook is not None:
210
+ result = object_pairs_hook(pairs)
211
+ return result, end
212
+ pairs = dict(pairs)
213
+ if object_hook is not None:
214
+ pairs = object_hook(pairs)
215
+ return pairs, end
216
+
217
+ def JSONArray(s_and_end, scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
218
+ s, end = s_and_end
219
+ values = []
220
+ nextchar = s[end:end + 1]
221
+ if nextchar in _ws:
222
+ end = _w(s, end + 1).end()
223
+ nextchar = s[end:end + 1]
224
+ # Look-ahead for trivial empty array
225
+ if nextchar == ']':
226
+ return values, end + 1
227
+ _append = values.append
228
+ while True:
229
+ try:
230
+ value, end = scan_once(s, end)
231
+ except StopIteration as err:
232
+ raise JSONDecodeError("Expecting value", s, err.value) from None
233
+ _append(value)
234
+ nextchar = s[end:end + 1]
235
+ if nextchar in _ws:
236
+ end = _w(s, end + 1).end()
237
+ nextchar = s[end:end + 1]
238
+ end += 1
239
+ if nextchar == ']':
240
+ break
241
+ elif nextchar != ',':
242
+ raise JSONDecodeError("Expecting ',' delimiter", s, end - 1)
243
+ try:
244
+ if s[end] in _ws:
245
+ end += 1
246
+ if s[end] in _ws:
247
+ end = _w(s, end + 1).end()
248
+ except IndexError:
249
+ pass
250
+
251
+ return values, end
252
+
253
+
254
+ class JSONDecoder(object):
255
+ """Simple JSON <https://json.org> decoder
256
+
257
+ Performs the following translations in decoding by default:
258
+
259
+ +---------------+-------------------+
260
+ | JSON | Python |
261
+ +===============+===================+
262
+ | object | dict |
263
+ +---------------+-------------------+
264
+ | array | list |
265
+ +---------------+-------------------+
266
+ | string | str |
267
+ +---------------+-------------------+
268
+ | number (int) | int |
269
+ +---------------+-------------------+
270
+ | number (real) | float |
271
+ +---------------+-------------------+
272
+ | true | True |
273
+ +---------------+-------------------+
274
+ | false | False |
275
+ +---------------+-------------------+
276
+ | null | None |
277
+ +---------------+-------------------+
278
+
279
+ It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
280
+ their corresponding ``float`` values, which is outside the JSON spec.
281
+
282
+ """
283
+
284
+ def __init__(self, *, object_hook=None, parse_float=None,
285
+ parse_int=None, parse_constant=None, strict=True,
286
+ object_pairs_hook=None):
287
+ """``object_hook``, if specified, will be called with the result
288
+ of every JSON object decoded and its return value will be used in
289
+ place of the given ``dict``. This can be used to provide custom
290
+ deserializations (e.g. to support JSON-RPC class hinting).
291
+
292
+ ``object_pairs_hook``, if specified will be called with the result of
293
+ every JSON object decoded with an ordered list of pairs. The return
294
+ value of ``object_pairs_hook`` will be used instead of the ``dict``.
295
+ This feature can be used to implement custom decoders.
296
+ If ``object_hook`` is also defined, the ``object_pairs_hook`` takes
297
+ priority.
298
+
299
+ ``parse_float``, if specified, will be called with the string
300
+ of every JSON float to be decoded. By default this is equivalent to
301
+ float(num_str). This can be used to use another datatype or parser
302
+ for JSON floats (e.g. decimal.Decimal).
303
+
304
+ ``parse_int``, if specified, will be called with the string
305
+ of every JSON int to be decoded. By default this is equivalent to
306
+ int(num_str). This can be used to use another datatype or parser
307
+ for JSON integers (e.g. float).
308
+
309
+ ``parse_constant``, if specified, will be called with one of the
310
+ following strings: -Infinity, Infinity, NaN.
311
+ This can be used to raise an exception if invalid JSON numbers
312
+ are encountered.
313
+
314
+ If ``strict`` is false (true is the default), then control
315
+ characters will be allowed inside strings. Control characters in
316
+ this context are those with character codes in the 0-31 range,
317
+ including ``'\\t'`` (tab), ``'\\n'``, ``'\\r'`` and ``'\\0'``.
318
+ """
319
+ self.object_hook = object_hook
320
+ self.parse_float = parse_float or float
321
+ self.parse_int = parse_int or int
322
+ self.parse_constant = parse_constant or _CONSTANTS.__getitem__
323
+ self.strict = strict
324
+ self.object_pairs_hook = object_pairs_hook
325
+ self.parse_object = JSONObject
326
+ self.parse_array = JSONArray
327
+ self.parse_string = scanstring
328
+ self.memo = {}
329
+ self.scan_once = scanner.make_scanner(self)
330
+
331
+
332
+ def decode(self, s, _w=WHITESPACE.match):
333
+ """Return the Python representation of ``s`` (a ``str`` instance
334
+ containing a JSON document).
335
+
336
+ """
337
+ obj, end = self.raw_decode(s, idx=_w(s, 0).end())
338
+ end = _w(s, end).end()
339
+ if end != len(s):
340
+ raise JSONDecodeError("Extra data", s, end)
341
+ return obj
342
+
343
+ def raw_decode(self, s, idx=0):
344
+ """Decode a JSON document from ``s`` (a ``str`` beginning with
345
+ a JSON document) and return a 2-tuple of the Python
346
+ representation and the index in ``s`` where the document ended.
347
+
348
+ This can be used to decode a JSON document from a string that may
349
+ have extraneous data at the end.
350
+
351
+ """
352
+ try:
353
+ obj, end = self.scan_once(s, idx)
354
+ except StopIteration as err:
355
+ raise JSONDecodeError("Expecting value", s, err.value) from None
356
+ return obj, end
llava/lib/python3.10/json/scanner.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """JSON token scanner
2
+ """
3
+ import re
4
+ try:
5
+ from _json import make_scanner as c_make_scanner
6
+ except ImportError:
7
+ c_make_scanner = None
8
+
9
+ __all__ = ['make_scanner']
10
+
11
+ NUMBER_RE = re.compile(
12
+ r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?',
13
+ (re.VERBOSE | re.MULTILINE | re.DOTALL))
14
+
15
+ def py_make_scanner(context):
16
+ parse_object = context.parse_object
17
+ parse_array = context.parse_array
18
+ parse_string = context.parse_string
19
+ match_number = NUMBER_RE.match
20
+ strict = context.strict
21
+ parse_float = context.parse_float
22
+ parse_int = context.parse_int
23
+ parse_constant = context.parse_constant
24
+ object_hook = context.object_hook
25
+ object_pairs_hook = context.object_pairs_hook
26
+ memo = context.memo
27
+
28
+ def _scan_once(string, idx):
29
+ try:
30
+ nextchar = string[idx]
31
+ except IndexError:
32
+ raise StopIteration(idx) from None
33
+
34
+ if nextchar == '"':
35
+ return parse_string(string, idx + 1, strict)
36
+ elif nextchar == '{':
37
+ return parse_object((string, idx + 1), strict,
38
+ _scan_once, object_hook, object_pairs_hook, memo)
39
+ elif nextchar == '[':
40
+ return parse_array((string, idx + 1), _scan_once)
41
+ elif nextchar == 'n' and string[idx:idx + 4] == 'null':
42
+ return None, idx + 4
43
+ elif nextchar == 't' and string[idx:idx + 4] == 'true':
44
+ return True, idx + 4
45
+ elif nextchar == 'f' and string[idx:idx + 5] == 'false':
46
+ return False, idx + 5
47
+
48
+ m = match_number(string, idx)
49
+ if m is not None:
50
+ integer, frac, exp = m.groups()
51
+ if frac or exp:
52
+ res = parse_float(integer + (frac or '') + (exp or ''))
53
+ else:
54
+ res = parse_int(integer)
55
+ return res, m.end()
56
+ elif nextchar == 'N' and string[idx:idx + 3] == 'NaN':
57
+ return parse_constant('NaN'), idx + 3
58
+ elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity':
59
+ return parse_constant('Infinity'), idx + 8
60
+ elif nextchar == '-' and string[idx:idx + 9] == '-Infinity':
61
+ return parse_constant('-Infinity'), idx + 9
62
+ else:
63
+ raise StopIteration(idx)
64
+
65
+ def scan_once(string, idx):
66
+ try:
67
+ return _scan_once(string, idx)
68
+ finally:
69
+ memo.clear()
70
+
71
+ return scan_once
72
+
73
+ make_scanner = c_make_scanner or py_make_scanner
llava/lib/python3.10/json/tool.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ r"""Command-line tool to validate and pretty-print JSON
2
+
3
+ Usage::
4
+
5
+ $ echo '{"json":"obj"}' | python -m json.tool
6
+ {
7
+ "json": "obj"
8
+ }
9
+ $ echo '{ 1.2:3.4}' | python -m json.tool
10
+ Expecting property name enclosed in double quotes: line 1 column 3 (char 2)
11
+
12
+ """
13
+ import argparse
14
+ import json
15
+ import sys
16
+ from pathlib import Path
17
+
18
+
19
+ def main():
20
+ prog = 'python -m json.tool'
21
+ description = ('A simple command line interface for json module '
22
+ 'to validate and pretty-print JSON objects.')
23
+ parser = argparse.ArgumentParser(prog=prog, description=description)
24
+ parser.add_argument('infile', nargs='?',
25
+ type=argparse.FileType(encoding="utf-8"),
26
+ help='a JSON file to be validated or pretty-printed',
27
+ default=sys.stdin)
28
+ parser.add_argument('outfile', nargs='?',
29
+ type=Path,
30
+ help='write the output of infile to outfile',
31
+ default=None)
32
+ parser.add_argument('--sort-keys', action='store_true', default=False,
33
+ help='sort the output of dictionaries alphabetically by key')
34
+ parser.add_argument('--no-ensure-ascii', dest='ensure_ascii', action='store_false',
35
+ help='disable escaping of non-ASCII characters')
36
+ parser.add_argument('--json-lines', action='store_true', default=False,
37
+ help='parse input using the JSON Lines format. '
38
+ 'Use with --no-indent or --compact to produce valid JSON Lines output.')
39
+ group = parser.add_mutually_exclusive_group()
40
+ group.add_argument('--indent', default=4, type=int,
41
+ help='separate items with newlines and use this number '
42
+ 'of spaces for indentation')
43
+ group.add_argument('--tab', action='store_const', dest='indent',
44
+ const='\t', help='separate items with newlines and use '
45
+ 'tabs for indentation')
46
+ group.add_argument('--no-indent', action='store_const', dest='indent',
47
+ const=None,
48
+ help='separate items with spaces rather than newlines')
49
+ group.add_argument('--compact', action='store_true',
50
+ help='suppress all whitespace separation (most compact)')
51
+ options = parser.parse_args()
52
+
53
+ dump_args = {
54
+ 'sort_keys': options.sort_keys,
55
+ 'indent': options.indent,
56
+ 'ensure_ascii': options.ensure_ascii,
57
+ }
58
+ if options.compact:
59
+ dump_args['indent'] = None
60
+ dump_args['separators'] = ',', ':'
61
+
62
+ with options.infile as infile:
63
+ try:
64
+ if options.json_lines:
65
+ objs = (json.loads(line) for line in infile)
66
+ else:
67
+ objs = (json.load(infile),)
68
+
69
+ if options.outfile is None:
70
+ out = sys.stdout
71
+ else:
72
+ out = options.outfile.open('w', encoding='utf-8')
73
+ with out as outfile:
74
+ for obj in objs:
75
+ json.dump(obj, outfile, **dump_args)
76
+ outfile.write('\n')
77
+ except ValueError as e:
78
+ raise SystemExit(e)
79
+
80
+
81
+ if __name__ == '__main__':
82
+ try:
83
+ main()
84
+ except BrokenPipeError as exc:
85
+ sys.exit(exc.errno)
llava/lib/python3.10/multiprocessing/__init__.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Package analogous to 'threading.py' but using processes
3
+ #
4
+ # multiprocessing/__init__.py
5
+ #
6
+ # This package is intended to duplicate the functionality (and much of
7
+ # the API) of threading.py but uses processes instead of threads. A
8
+ # subpackage 'multiprocessing.dummy' has the same API but is a simple
9
+ # wrapper for 'threading'.
10
+ #
11
+ # Copyright (c) 2006-2008, R Oudkerk
12
+ # Licensed to PSF under a Contributor Agreement.
13
+ #
14
+
15
+ import sys
16
+ from . import context
17
+
18
+ #
19
+ # Copy stuff from default context
20
+ #
21
+
22
+ __all__ = [x for x in dir(context._default_context) if not x.startswith('_')]
23
+ globals().update((name, getattr(context._default_context, name)) for name in __all__)
24
+
25
+ #
26
+ # XXX These should not really be documented or public.
27
+ #
28
+
29
+ SUBDEBUG = 5
30
+ SUBWARNING = 25
31
+
32
+ #
33
+ # Alias for main module -- will be reset by bootstrapping child processes
34
+ #
35
+
36
+ if '__main__' in sys.modules:
37
+ sys.modules['__mp_main__'] = sys.modules['__main__']
llava/lib/python3.10/multiprocessing/connection.py ADDED
@@ -0,0 +1,973 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # A higher level module for using sockets (or Windows named pipes)
3
+ #
4
+ # multiprocessing/connection.py
5
+ #
6
+ # Copyright (c) 2006-2008, R Oudkerk
7
+ # Licensed to PSF under a Contributor Agreement.
8
+ #
9
+
10
+ __all__ = [ 'Client', 'Listener', 'Pipe', 'wait' ]
11
+
12
+ import io
13
+ import os
14
+ import sys
15
+ import socket
16
+ import struct
17
+ import time
18
+ import tempfile
19
+ import itertools
20
+
21
+ import _multiprocessing
22
+
23
+ from . import util
24
+
25
+ from . import AuthenticationError, BufferTooShort
26
+ from .context import reduction
27
+ _ForkingPickler = reduction.ForkingPickler
28
+
29
+ try:
30
+ import _winapi
31
+ from _winapi import WAIT_OBJECT_0, WAIT_ABANDONED_0, WAIT_TIMEOUT, INFINITE
32
+ except ImportError:
33
+ if sys.platform == 'win32':
34
+ raise
35
+ _winapi = None
36
+
37
+ #
38
+ #
39
+ #
40
+
41
+ BUFSIZE = 8192
42
+ # A very generous timeout when it comes to local connections...
43
+ CONNECTION_TIMEOUT = 20.
44
+
45
+ _mmap_counter = itertools.count()
46
+
47
+ default_family = 'AF_INET'
48
+ families = ['AF_INET']
49
+
50
+ if hasattr(socket, 'AF_UNIX'):
51
+ default_family = 'AF_UNIX'
52
+ families += ['AF_UNIX']
53
+
54
+ if sys.platform == 'win32':
55
+ default_family = 'AF_PIPE'
56
+ families += ['AF_PIPE']
57
+
58
+
59
+ def _init_timeout(timeout=CONNECTION_TIMEOUT):
60
+ return time.monotonic() + timeout
61
+
62
+ def _check_timeout(t):
63
+ return time.monotonic() > t
64
+
65
+ #
66
+ #
67
+ #
68
+
69
+ def arbitrary_address(family):
70
+ '''
71
+ Return an arbitrary free address for the given family
72
+ '''
73
+ if family == 'AF_INET':
74
+ return ('localhost', 0)
75
+ elif family == 'AF_UNIX':
76
+ return tempfile.mktemp(prefix='listener-', dir=util.get_temp_dir())
77
+ elif family == 'AF_PIPE':
78
+ return tempfile.mktemp(prefix=r'\\.\pipe\pyc-%d-%d-' %
79
+ (os.getpid(), next(_mmap_counter)), dir="")
80
+ else:
81
+ raise ValueError('unrecognized family')
82
+
83
+ def _validate_family(family):
84
+ '''
85
+ Checks if the family is valid for the current environment.
86
+ '''
87
+ if sys.platform != 'win32' and family == 'AF_PIPE':
88
+ raise ValueError('Family %s is not recognized.' % family)
89
+
90
+ if sys.platform == 'win32' and family == 'AF_UNIX':
91
+ # double check
92
+ if not hasattr(socket, family):
93
+ raise ValueError('Family %s is not recognized.' % family)
94
+
95
+ def address_type(address):
96
+ '''
97
+ Return the types of the address
98
+
99
+ This can be 'AF_INET', 'AF_UNIX', or 'AF_PIPE'
100
+ '''
101
+ if type(address) == tuple:
102
+ return 'AF_INET'
103
+ elif type(address) is str and address.startswith('\\\\'):
104
+ return 'AF_PIPE'
105
+ elif type(address) is str or util.is_abstract_socket_namespace(address):
106
+ return 'AF_UNIX'
107
+ else:
108
+ raise ValueError('address type of %r unrecognized' % address)
109
+
110
+ #
111
+ # Connection classes
112
+ #
113
+
114
+ class _ConnectionBase:
115
+ _handle = None
116
+
117
+ def __init__(self, handle, readable=True, writable=True):
118
+ handle = handle.__index__()
119
+ if handle < 0:
120
+ raise ValueError("invalid handle")
121
+ if not readable and not writable:
122
+ raise ValueError(
123
+ "at least one of `readable` and `writable` must be True")
124
+ self._handle = handle
125
+ self._readable = readable
126
+ self._writable = writable
127
+
128
+ # XXX should we use util.Finalize instead of a __del__?
129
+
130
+ def __del__(self):
131
+ if self._handle is not None:
132
+ self._close()
133
+
134
+ def _check_closed(self):
135
+ if self._handle is None:
136
+ raise OSError("handle is closed")
137
+
138
+ def _check_readable(self):
139
+ if not self._readable:
140
+ raise OSError("connection is write-only")
141
+
142
+ def _check_writable(self):
143
+ if not self._writable:
144
+ raise OSError("connection is read-only")
145
+
146
+ def _bad_message_length(self):
147
+ if self._writable:
148
+ self._readable = False
149
+ else:
150
+ self.close()
151
+ raise OSError("bad message length")
152
+
153
+ @property
154
+ def closed(self):
155
+ """True if the connection is closed"""
156
+ return self._handle is None
157
+
158
+ @property
159
+ def readable(self):
160
+ """True if the connection is readable"""
161
+ return self._readable
162
+
163
+ @property
164
+ def writable(self):
165
+ """True if the connection is writable"""
166
+ return self._writable
167
+
168
+ def fileno(self):
169
+ """File descriptor or handle of the connection"""
170
+ self._check_closed()
171
+ return self._handle
172
+
173
+ def close(self):
174
+ """Close the connection"""
175
+ if self._handle is not None:
176
+ try:
177
+ self._close()
178
+ finally:
179
+ self._handle = None
180
+
181
+ def send_bytes(self, buf, offset=0, size=None):
182
+ """Send the bytes data from a bytes-like object"""
183
+ self._check_closed()
184
+ self._check_writable()
185
+ m = memoryview(buf)
186
+ # HACK for byte-indexing of non-bytewise buffers (e.g. array.array)
187
+ if m.itemsize > 1:
188
+ m = memoryview(bytes(m))
189
+ n = len(m)
190
+ if offset < 0:
191
+ raise ValueError("offset is negative")
192
+ if n < offset:
193
+ raise ValueError("buffer length < offset")
194
+ if size is None:
195
+ size = n - offset
196
+ elif size < 0:
197
+ raise ValueError("size is negative")
198
+ elif offset + size > n:
199
+ raise ValueError("buffer length < offset + size")
200
+ self._send_bytes(m[offset:offset + size])
201
+
202
+ def send(self, obj):
203
+ """Send a (picklable) object"""
204
+ self._check_closed()
205
+ self._check_writable()
206
+ self._send_bytes(_ForkingPickler.dumps(obj))
207
+
208
+ def recv_bytes(self, maxlength=None):
209
+ """
210
+ Receive bytes data as a bytes object.
211
+ """
212
+ self._check_closed()
213
+ self._check_readable()
214
+ if maxlength is not None and maxlength < 0:
215
+ raise ValueError("negative maxlength")
216
+ buf = self._recv_bytes(maxlength)
217
+ if buf is None:
218
+ self._bad_message_length()
219
+ return buf.getvalue()
220
+
221
+ def recv_bytes_into(self, buf, offset=0):
222
+ """
223
+ Receive bytes data into a writeable bytes-like object.
224
+ Return the number of bytes read.
225
+ """
226
+ self._check_closed()
227
+ self._check_readable()
228
+ with memoryview(buf) as m:
229
+ # Get bytesize of arbitrary buffer
230
+ itemsize = m.itemsize
231
+ bytesize = itemsize * len(m)
232
+ if offset < 0:
233
+ raise ValueError("negative offset")
234
+ elif offset > bytesize:
235
+ raise ValueError("offset too large")
236
+ result = self._recv_bytes()
237
+ size = result.tell()
238
+ if bytesize < offset + size:
239
+ raise BufferTooShort(result.getvalue())
240
+ # Message can fit in dest
241
+ result.seek(0)
242
+ result.readinto(m[offset // itemsize :
243
+ (offset + size) // itemsize])
244
+ return size
245
+
246
+ def recv(self):
247
+ """Receive a (picklable) object"""
248
+ self._check_closed()
249
+ self._check_readable()
250
+ buf = self._recv_bytes()
251
+ return _ForkingPickler.loads(buf.getbuffer())
252
+
253
+ def poll(self, timeout=0.0):
254
+ """Whether there is any input available to be read"""
255
+ self._check_closed()
256
+ self._check_readable()
257
+ return self._poll(timeout)
258
+
259
+ def __enter__(self):
260
+ return self
261
+
262
+ def __exit__(self, exc_type, exc_value, exc_tb):
263
+ self.close()
264
+
265
+
266
+ if _winapi:
267
+
268
+ class PipeConnection(_ConnectionBase):
269
+ """
270
+ Connection class based on a Windows named pipe.
271
+ Overlapped I/O is used, so the handles must have been created
272
+ with FILE_FLAG_OVERLAPPED.
273
+ """
274
+ _got_empty_message = False
275
+
276
+ def _close(self, _CloseHandle=_winapi.CloseHandle):
277
+ _CloseHandle(self._handle)
278
+
279
+ def _send_bytes(self, buf):
280
+ ov, err = _winapi.WriteFile(self._handle, buf, overlapped=True)
281
+ try:
282
+ if err == _winapi.ERROR_IO_PENDING:
283
+ waitres = _winapi.WaitForMultipleObjects(
284
+ [ov.event], False, INFINITE)
285
+ assert waitres == WAIT_OBJECT_0
286
+ except:
287
+ ov.cancel()
288
+ raise
289
+ finally:
290
+ nwritten, err = ov.GetOverlappedResult(True)
291
+ assert err == 0
292
+ assert nwritten == len(buf)
293
+
294
+ def _recv_bytes(self, maxsize=None):
295
+ if self._got_empty_message:
296
+ self._got_empty_message = False
297
+ return io.BytesIO()
298
+ else:
299
+ bsize = 128 if maxsize is None else min(maxsize, 128)
300
+ try:
301
+ ov, err = _winapi.ReadFile(self._handle, bsize,
302
+ overlapped=True)
303
+ try:
304
+ if err == _winapi.ERROR_IO_PENDING:
305
+ waitres = _winapi.WaitForMultipleObjects(
306
+ [ov.event], False, INFINITE)
307
+ assert waitres == WAIT_OBJECT_0
308
+ except:
309
+ ov.cancel()
310
+ raise
311
+ finally:
312
+ nread, err = ov.GetOverlappedResult(True)
313
+ if err == 0:
314
+ f = io.BytesIO()
315
+ f.write(ov.getbuffer())
316
+ return f
317
+ elif err == _winapi.ERROR_MORE_DATA:
318
+ return self._get_more_data(ov, maxsize)
319
+ except OSError as e:
320
+ if e.winerror == _winapi.ERROR_BROKEN_PIPE:
321
+ raise EOFError
322
+ else:
323
+ raise
324
+ raise RuntimeError("shouldn't get here; expected KeyboardInterrupt")
325
+
326
+ def _poll(self, timeout):
327
+ if (self._got_empty_message or
328
+ _winapi.PeekNamedPipe(self._handle)[0] != 0):
329
+ return True
330
+ return bool(wait([self], timeout))
331
+
332
+ def _get_more_data(self, ov, maxsize):
333
+ buf = ov.getbuffer()
334
+ f = io.BytesIO()
335
+ f.write(buf)
336
+ left = _winapi.PeekNamedPipe(self._handle)[1]
337
+ assert left > 0
338
+ if maxsize is not None and len(buf) + left > maxsize:
339
+ self._bad_message_length()
340
+ ov, err = _winapi.ReadFile(self._handle, left, overlapped=True)
341
+ rbytes, err = ov.GetOverlappedResult(True)
342
+ assert err == 0
343
+ assert rbytes == left
344
+ f.write(ov.getbuffer())
345
+ return f
346
+
347
+
348
+ class Connection(_ConnectionBase):
349
+ """
350
+ Connection class based on an arbitrary file descriptor (Unix only), or
351
+ a socket handle (Windows).
352
+ """
353
+
354
+ if _winapi:
355
+ def _close(self, _close=_multiprocessing.closesocket):
356
+ _close(self._handle)
357
+ _write = _multiprocessing.send
358
+ _read = _multiprocessing.recv
359
+ else:
360
+ def _close(self, _close=os.close):
361
+ _close(self._handle)
362
+ _write = os.write
363
+ _read = os.read
364
+
365
+ def _send(self, buf, write=_write):
366
+ remaining = len(buf)
367
+ while True:
368
+ n = write(self._handle, buf)
369
+ remaining -= n
370
+ if remaining == 0:
371
+ break
372
+ buf = buf[n:]
373
+
374
+ def _recv(self, size, read=_read):
375
+ buf = io.BytesIO()
376
+ handle = self._handle
377
+ remaining = size
378
+ while remaining > 0:
379
+ chunk = read(handle, remaining)
380
+ n = len(chunk)
381
+ if n == 0:
382
+ if remaining == size:
383
+ raise EOFError
384
+ else:
385
+ raise OSError("got end of file during message")
386
+ buf.write(chunk)
387
+ remaining -= n
388
+ return buf
389
+
390
+ def _send_bytes(self, buf):
391
+ n = len(buf)
392
+ if n > 0x7fffffff:
393
+ pre_header = struct.pack("!i", -1)
394
+ header = struct.pack("!Q", n)
395
+ self._send(pre_header)
396
+ self._send(header)
397
+ self._send(buf)
398
+ else:
399
+ # For wire compatibility with 3.7 and lower
400
+ header = struct.pack("!i", n)
401
+ if n > 16384:
402
+ # The payload is large so Nagle's algorithm won't be triggered
403
+ # and we'd better avoid the cost of concatenation.
404
+ self._send(header)
405
+ self._send(buf)
406
+ else:
407
+ # Issue #20540: concatenate before sending, to avoid delays due
408
+ # to Nagle's algorithm on a TCP socket.
409
+ # Also note we want to avoid sending a 0-length buffer separately,
410
+ # to avoid "broken pipe" errors if the other end closed the pipe.
411
+ self._send(header + buf)
412
+
413
+ def _recv_bytes(self, maxsize=None):
414
+ buf = self._recv(4)
415
+ size, = struct.unpack("!i", buf.getvalue())
416
+ if size == -1:
417
+ buf = self._recv(8)
418
+ size, = struct.unpack("!Q", buf.getvalue())
419
+ if maxsize is not None and size > maxsize:
420
+ return None
421
+ return self._recv(size)
422
+
423
+ def _poll(self, timeout):
424
+ r = wait([self], timeout)
425
+ return bool(r)
426
+
427
+
428
+ #
429
+ # Public functions
430
+ #
431
+
432
+ class Listener(object):
433
+ '''
434
+ Returns a listener object.
435
+
436
+ This is a wrapper for a bound socket which is 'listening' for
437
+ connections, or for a Windows named pipe.
438
+ '''
439
+ def __init__(self, address=None, family=None, backlog=1, authkey=None):
440
+ family = family or (address and address_type(address)) \
441
+ or default_family
442
+ address = address or arbitrary_address(family)
443
+
444
+ _validate_family(family)
445
+ if family == 'AF_PIPE':
446
+ self._listener = PipeListener(address, backlog)
447
+ else:
448
+ self._listener = SocketListener(address, family, backlog)
449
+
450
+ if authkey is not None and not isinstance(authkey, bytes):
451
+ raise TypeError('authkey should be a byte string')
452
+
453
+ self._authkey = authkey
454
+
455
+ def accept(self):
456
+ '''
457
+ Accept a connection on the bound socket or named pipe of `self`.
458
+
459
+ Returns a `Connection` object.
460
+ '''
461
+ if self._listener is None:
462
+ raise OSError('listener is closed')
463
+ c = self._listener.accept()
464
+ if self._authkey:
465
+ deliver_challenge(c, self._authkey)
466
+ answer_challenge(c, self._authkey)
467
+ return c
468
+
469
+ def close(self):
470
+ '''
471
+ Close the bound socket or named pipe of `self`.
472
+ '''
473
+ listener = self._listener
474
+ if listener is not None:
475
+ self._listener = None
476
+ listener.close()
477
+
478
+ @property
479
+ def address(self):
480
+ return self._listener._address
481
+
482
+ @property
483
+ def last_accepted(self):
484
+ return self._listener._last_accepted
485
+
486
+ def __enter__(self):
487
+ return self
488
+
489
+ def __exit__(self, exc_type, exc_value, exc_tb):
490
+ self.close()
491
+
492
+
493
+ def Client(address, family=None, authkey=None):
494
+ '''
495
+ Returns a connection to the address of a `Listener`
496
+ '''
497
+ family = family or address_type(address)
498
+ _validate_family(family)
499
+ if family == 'AF_PIPE':
500
+ c = PipeClient(address)
501
+ else:
502
+ c = SocketClient(address)
503
+
504
+ if authkey is not None and not isinstance(authkey, bytes):
505
+ raise TypeError('authkey should be a byte string')
506
+
507
+ if authkey is not None:
508
+ answer_challenge(c, authkey)
509
+ deliver_challenge(c, authkey)
510
+
511
+ return c
512
+
513
+
514
+ if sys.platform != 'win32':
515
+
516
+ def Pipe(duplex=True):
517
+ '''
518
+ Returns pair of connection objects at either end of a pipe
519
+ '''
520
+ if duplex:
521
+ s1, s2 = socket.socketpair()
522
+ s1.setblocking(True)
523
+ s2.setblocking(True)
524
+ c1 = Connection(s1.detach())
525
+ c2 = Connection(s2.detach())
526
+ else:
527
+ fd1, fd2 = os.pipe()
528
+ c1 = Connection(fd1, writable=False)
529
+ c2 = Connection(fd2, readable=False)
530
+
531
+ return c1, c2
532
+
533
+ else:
534
+
535
+ def Pipe(duplex=True):
536
+ '''
537
+ Returns pair of connection objects at either end of a pipe
538
+ '''
539
+ address = arbitrary_address('AF_PIPE')
540
+ if duplex:
541
+ openmode = _winapi.PIPE_ACCESS_DUPLEX
542
+ access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE
543
+ obsize, ibsize = BUFSIZE, BUFSIZE
544
+ else:
545
+ openmode = _winapi.PIPE_ACCESS_INBOUND
546
+ access = _winapi.GENERIC_WRITE
547
+ obsize, ibsize = 0, BUFSIZE
548
+
549
+ h1 = _winapi.CreateNamedPipe(
550
+ address, openmode | _winapi.FILE_FLAG_OVERLAPPED |
551
+ _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE,
552
+ _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE |
553
+ _winapi.PIPE_WAIT,
554
+ 1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER,
555
+ # default security descriptor: the handle cannot be inherited
556
+ _winapi.NULL
557
+ )
558
+ h2 = _winapi.CreateFile(
559
+ address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING,
560
+ _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL
561
+ )
562
+ _winapi.SetNamedPipeHandleState(
563
+ h2, _winapi.PIPE_READMODE_MESSAGE, None, None
564
+ )
565
+
566
+ overlapped = _winapi.ConnectNamedPipe(h1, overlapped=True)
567
+ _, err = overlapped.GetOverlappedResult(True)
568
+ assert err == 0
569
+
570
+ c1 = PipeConnection(h1, writable=duplex)
571
+ c2 = PipeConnection(h2, readable=duplex)
572
+
573
+ return c1, c2
574
+
575
+ #
576
+ # Definitions for connections based on sockets
577
+ #
578
+
579
+ class SocketListener(object):
580
+ '''
581
+ Representation of a socket which is bound to an address and listening
582
+ '''
583
+ def __init__(self, address, family, backlog=1):
584
+ self._socket = socket.socket(getattr(socket, family))
585
+ try:
586
+ # SO_REUSEADDR has different semantics on Windows (issue #2550).
587
+ if os.name == 'posix':
588
+ self._socket.setsockopt(socket.SOL_SOCKET,
589
+ socket.SO_REUSEADDR, 1)
590
+ self._socket.setblocking(True)
591
+ self._socket.bind(address)
592
+ self._socket.listen(backlog)
593
+ self._address = self._socket.getsockname()
594
+ except OSError:
595
+ self._socket.close()
596
+ raise
597
+ self._family = family
598
+ self._last_accepted = None
599
+
600
+ if family == 'AF_UNIX' and not util.is_abstract_socket_namespace(address):
601
+ # Linux abstract socket namespaces do not need to be explicitly unlinked
602
+ self._unlink = util.Finalize(
603
+ self, os.unlink, args=(address,), exitpriority=0
604
+ )
605
+ else:
606
+ self._unlink = None
607
+
608
+ def accept(self):
609
+ s, self._last_accepted = self._socket.accept()
610
+ s.setblocking(True)
611
+ return Connection(s.detach())
612
+
613
+ def close(self):
614
+ try:
615
+ self._socket.close()
616
+ finally:
617
+ unlink = self._unlink
618
+ if unlink is not None:
619
+ self._unlink = None
620
+ unlink()
621
+
622
+
623
+ def SocketClient(address):
624
+ '''
625
+ Return a connection object connected to the socket given by `address`
626
+ '''
627
+ family = address_type(address)
628
+ with socket.socket( getattr(socket, family) ) as s:
629
+ s.setblocking(True)
630
+ s.connect(address)
631
+ return Connection(s.detach())
632
+
633
+ #
634
+ # Definitions for connections based on named pipes
635
+ #
636
+
637
+ if sys.platform == 'win32':
638
+
639
+ class PipeListener(object):
640
+ '''
641
+ Representation of a named pipe
642
+ '''
643
+ def __init__(self, address, backlog=None):
644
+ self._address = address
645
+ self._handle_queue = [self._new_handle(first=True)]
646
+
647
+ self._last_accepted = None
648
+ util.sub_debug('listener created with address=%r', self._address)
649
+ self.close = util.Finalize(
650
+ self, PipeListener._finalize_pipe_listener,
651
+ args=(self._handle_queue, self._address), exitpriority=0
652
+ )
653
+
654
+ def _new_handle(self, first=False):
655
+ flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED
656
+ if first:
657
+ flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE
658
+ return _winapi.CreateNamedPipe(
659
+ self._address, flags,
660
+ _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE |
661
+ _winapi.PIPE_WAIT,
662
+ _winapi.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE,
663
+ _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL
664
+ )
665
+
666
+ def accept(self):
667
+ self._handle_queue.append(self._new_handle())
668
+ handle = self._handle_queue.pop(0)
669
+ try:
670
+ ov = _winapi.ConnectNamedPipe(handle, overlapped=True)
671
+ except OSError as e:
672
+ if e.winerror != _winapi.ERROR_NO_DATA:
673
+ raise
674
+ # ERROR_NO_DATA can occur if a client has already connected,
675
+ # written data and then disconnected -- see Issue 14725.
676
+ else:
677
+ try:
678
+ res = _winapi.WaitForMultipleObjects(
679
+ [ov.event], False, INFINITE)
680
+ except:
681
+ ov.cancel()
682
+ _winapi.CloseHandle(handle)
683
+ raise
684
+ finally:
685
+ _, err = ov.GetOverlappedResult(True)
686
+ assert err == 0
687
+ return PipeConnection(handle)
688
+
689
+ @staticmethod
690
+ def _finalize_pipe_listener(queue, address):
691
+ util.sub_debug('closing listener with address=%r', address)
692
+ for handle in queue:
693
+ _winapi.CloseHandle(handle)
694
+
695
+ def PipeClient(address):
696
+ '''
697
+ Return a connection object connected to the pipe given by `address`
698
+ '''
699
+ t = _init_timeout()
700
+ while 1:
701
+ try:
702
+ _winapi.WaitNamedPipe(address, 1000)
703
+ h = _winapi.CreateFile(
704
+ address, _winapi.GENERIC_READ | _winapi.GENERIC_WRITE,
705
+ 0, _winapi.NULL, _winapi.OPEN_EXISTING,
706
+ _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL
707
+ )
708
+ except OSError as e:
709
+ if e.winerror not in (_winapi.ERROR_SEM_TIMEOUT,
710
+ _winapi.ERROR_PIPE_BUSY) or _check_timeout(t):
711
+ raise
712
+ else:
713
+ break
714
+ else:
715
+ raise
716
+
717
+ _winapi.SetNamedPipeHandleState(
718
+ h, _winapi.PIPE_READMODE_MESSAGE, None, None
719
+ )
720
+ return PipeConnection(h)
721
+
722
+ #
723
+ # Authentication stuff
724
+ #
725
+
726
+ MESSAGE_LENGTH = 20
727
+
728
+ CHALLENGE = b'#CHALLENGE#'
729
+ WELCOME = b'#WELCOME#'
730
+ FAILURE = b'#FAILURE#'
731
+
732
+ def deliver_challenge(connection, authkey):
733
+ import hmac
734
+ if not isinstance(authkey, bytes):
735
+ raise ValueError(
736
+ "Authkey must be bytes, not {0!s}".format(type(authkey)))
737
+ message = os.urandom(MESSAGE_LENGTH)
738
+ connection.send_bytes(CHALLENGE + message)
739
+ digest = hmac.new(authkey, message, 'md5').digest()
740
+ response = connection.recv_bytes(256) # reject large message
741
+ if response == digest:
742
+ connection.send_bytes(WELCOME)
743
+ else:
744
+ connection.send_bytes(FAILURE)
745
+ raise AuthenticationError('digest received was wrong')
746
+
747
+ def answer_challenge(connection, authkey):
748
+ import hmac
749
+ if not isinstance(authkey, bytes):
750
+ raise ValueError(
751
+ "Authkey must be bytes, not {0!s}".format(type(authkey)))
752
+ message = connection.recv_bytes(256) # reject large message
753
+ assert message[:len(CHALLENGE)] == CHALLENGE, 'message = %r' % message
754
+ message = message[len(CHALLENGE):]
755
+ digest = hmac.new(authkey, message, 'md5').digest()
756
+ connection.send_bytes(digest)
757
+ response = connection.recv_bytes(256) # reject large message
758
+ if response != WELCOME:
759
+ raise AuthenticationError('digest sent was rejected')
760
+
761
+ #
762
+ # Support for using xmlrpclib for serialization
763
+ #
764
+
765
+ class ConnectionWrapper(object):
766
+ def __init__(self, conn, dumps, loads):
767
+ self._conn = conn
768
+ self._dumps = dumps
769
+ self._loads = loads
770
+ for attr in ('fileno', 'close', 'poll', 'recv_bytes', 'send_bytes'):
771
+ obj = getattr(conn, attr)
772
+ setattr(self, attr, obj)
773
+ def send(self, obj):
774
+ s = self._dumps(obj)
775
+ self._conn.send_bytes(s)
776
+ def recv(self):
777
+ s = self._conn.recv_bytes()
778
+ return self._loads(s)
779
+
780
+ def _xml_dumps(obj):
781
+ return xmlrpclib.dumps((obj,), None, None, None, 1).encode('utf-8')
782
+
783
+ def _xml_loads(s):
784
+ (obj,), method = xmlrpclib.loads(s.decode('utf-8'))
785
+ return obj
786
+
787
+ class XmlListener(Listener):
788
+ def accept(self):
789
+ global xmlrpclib
790
+ import xmlrpc.client as xmlrpclib
791
+ obj = Listener.accept(self)
792
+ return ConnectionWrapper(obj, _xml_dumps, _xml_loads)
793
+
794
+ def XmlClient(*args, **kwds):
795
+ global xmlrpclib
796
+ import xmlrpc.client as xmlrpclib
797
+ return ConnectionWrapper(Client(*args, **kwds), _xml_dumps, _xml_loads)
798
+
799
+ #
800
+ # Wait
801
+ #
802
+
803
+ if sys.platform == 'win32':
804
+
805
+ def _exhaustive_wait(handles, timeout):
806
+ # Return ALL handles which are currently signalled. (Only
807
+ # returning the first signalled might create starvation issues.)
808
+ L = list(handles)
809
+ ready = []
810
+ while L:
811
+ res = _winapi.WaitForMultipleObjects(L, False, timeout)
812
+ if res == WAIT_TIMEOUT:
813
+ break
814
+ elif WAIT_OBJECT_0 <= res < WAIT_OBJECT_0 + len(L):
815
+ res -= WAIT_OBJECT_0
816
+ elif WAIT_ABANDONED_0 <= res < WAIT_ABANDONED_0 + len(L):
817
+ res -= WAIT_ABANDONED_0
818
+ else:
819
+ raise RuntimeError('Should not get here')
820
+ ready.append(L[res])
821
+ L = L[res+1:]
822
+ timeout = 0
823
+ return ready
824
+
825
+ _ready_errors = {_winapi.ERROR_BROKEN_PIPE, _winapi.ERROR_NETNAME_DELETED}
826
+
827
+ def wait(object_list, timeout=None):
828
+ '''
829
+ Wait till an object in object_list is ready/readable.
830
+
831
+ Returns list of those objects in object_list which are ready/readable.
832
+ '''
833
+ if timeout is None:
834
+ timeout = INFINITE
835
+ elif timeout < 0:
836
+ timeout = 0
837
+ else:
838
+ timeout = int(timeout * 1000 + 0.5)
839
+
840
+ object_list = list(object_list)
841
+ waithandle_to_obj = {}
842
+ ov_list = []
843
+ ready_objects = set()
844
+ ready_handles = set()
845
+
846
+ try:
847
+ for o in object_list:
848
+ try:
849
+ fileno = getattr(o, 'fileno')
850
+ except AttributeError:
851
+ waithandle_to_obj[o.__index__()] = o
852
+ else:
853
+ # start an overlapped read of length zero
854
+ try:
855
+ ov, err = _winapi.ReadFile(fileno(), 0, True)
856
+ except OSError as e:
857
+ ov, err = None, e.winerror
858
+ if err not in _ready_errors:
859
+ raise
860
+ if err == _winapi.ERROR_IO_PENDING:
861
+ ov_list.append(ov)
862
+ waithandle_to_obj[ov.event] = o
863
+ else:
864
+ # If o.fileno() is an overlapped pipe handle and
865
+ # err == 0 then there is a zero length message
866
+ # in the pipe, but it HAS NOT been consumed...
867
+ if ov and sys.getwindowsversion()[:2] >= (6, 2):
868
+ # ... except on Windows 8 and later, where
869
+ # the message HAS been consumed.
870
+ try:
871
+ _, err = ov.GetOverlappedResult(False)
872
+ except OSError as e:
873
+ err = e.winerror
874
+ if not err and hasattr(o, '_got_empty_message'):
875
+ o._got_empty_message = True
876
+ ready_objects.add(o)
877
+ timeout = 0
878
+
879
+ ready_handles = _exhaustive_wait(waithandle_to_obj.keys(), timeout)
880
+ finally:
881
+ # request that overlapped reads stop
882
+ for ov in ov_list:
883
+ ov.cancel()
884
+
885
+ # wait for all overlapped reads to stop
886
+ for ov in ov_list:
887
+ try:
888
+ _, err = ov.GetOverlappedResult(True)
889
+ except OSError as e:
890
+ err = e.winerror
891
+ if err not in _ready_errors:
892
+ raise
893
+ if err != _winapi.ERROR_OPERATION_ABORTED:
894
+ o = waithandle_to_obj[ov.event]
895
+ ready_objects.add(o)
896
+ if err == 0:
897
+ # If o.fileno() is an overlapped pipe handle then
898
+ # a zero length message HAS been consumed.
899
+ if hasattr(o, '_got_empty_message'):
900
+ o._got_empty_message = True
901
+
902
+ ready_objects.update(waithandle_to_obj[h] for h in ready_handles)
903
+ return [o for o in object_list if o in ready_objects]
904
+
905
+ else:
906
+
907
+ import selectors
908
+
909
+ # poll/select have the advantage of not requiring any extra file
910
+ # descriptor, contrarily to epoll/kqueue (also, they require a single
911
+ # syscall).
912
+ if hasattr(selectors, 'PollSelector'):
913
+ _WaitSelector = selectors.PollSelector
914
+ else:
915
+ _WaitSelector = selectors.SelectSelector
916
+
917
+ def wait(object_list, timeout=None):
918
+ '''
919
+ Wait till an object in object_list is ready/readable.
920
+
921
+ Returns list of those objects in object_list which are ready/readable.
922
+ '''
923
+ with _WaitSelector() as selector:
924
+ for obj in object_list:
925
+ selector.register(obj, selectors.EVENT_READ)
926
+
927
+ if timeout is not None:
928
+ deadline = time.monotonic() + timeout
929
+
930
+ while True:
931
+ ready = selector.select(timeout)
932
+ if ready:
933
+ return [key.fileobj for (key, events) in ready]
934
+ else:
935
+ if timeout is not None:
936
+ timeout = deadline - time.monotonic()
937
+ if timeout < 0:
938
+ return ready
939
+
940
+ #
941
+ # Make connection and socket objects sharable if possible
942
+ #
943
+
944
+ if sys.platform == 'win32':
945
+ def reduce_connection(conn):
946
+ handle = conn.fileno()
947
+ with socket.fromfd(handle, socket.AF_INET, socket.SOCK_STREAM) as s:
948
+ from . import resource_sharer
949
+ ds = resource_sharer.DupSocket(s)
950
+ return rebuild_connection, (ds, conn.readable, conn.writable)
951
+ def rebuild_connection(ds, readable, writable):
952
+ sock = ds.detach()
953
+ return Connection(sock.detach(), readable, writable)
954
+ reduction.register(Connection, reduce_connection)
955
+
956
+ def reduce_pipe_connection(conn):
957
+ access = ((_winapi.FILE_GENERIC_READ if conn.readable else 0) |
958
+ (_winapi.FILE_GENERIC_WRITE if conn.writable else 0))
959
+ dh = reduction.DupHandle(conn.fileno(), access)
960
+ return rebuild_pipe_connection, (dh, conn.readable, conn.writable)
961
+ def rebuild_pipe_connection(dh, readable, writable):
962
+ handle = dh.detach()
963
+ return PipeConnection(handle, readable, writable)
964
+ reduction.register(PipeConnection, reduce_pipe_connection)
965
+
966
+ else:
967
+ def reduce_connection(conn):
968
+ df = reduction.DupFd(conn.fileno())
969
+ return rebuild_connection, (df, conn.readable, conn.writable)
970
+ def rebuild_connection(df, readable, writable):
971
+ fd = df.detach()
972
+ return Connection(fd, readable, writable)
973
+ reduction.register(Connection, reduce_connection)
llava/lib/python3.10/multiprocessing/context.py ADDED
@@ -0,0 +1,376 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import threading
4
+
5
+ from . import process
6
+ from . import reduction
7
+
8
+ __all__ = ()
9
+
10
+ #
11
+ # Exceptions
12
+ #
13
+
14
+ class ProcessError(Exception):
15
+ pass
16
+
17
+ class BufferTooShort(ProcessError):
18
+ pass
19
+
20
+ class TimeoutError(ProcessError):
21
+ pass
22
+
23
+ class AuthenticationError(ProcessError):
24
+ pass
25
+
26
+ #
27
+ # Base type for contexts. Bound methods of an instance of this type are included in __all__ of __init__.py
28
+ #
29
+
30
+ class BaseContext(object):
31
+
32
+ ProcessError = ProcessError
33
+ BufferTooShort = BufferTooShort
34
+ TimeoutError = TimeoutError
35
+ AuthenticationError = AuthenticationError
36
+
37
+ current_process = staticmethod(process.current_process)
38
+ parent_process = staticmethod(process.parent_process)
39
+ active_children = staticmethod(process.active_children)
40
+
41
+ def cpu_count(self):
42
+ '''Returns the number of CPUs in the system'''
43
+ num = os.cpu_count()
44
+ if num is None:
45
+ raise NotImplementedError('cannot determine number of cpus')
46
+ else:
47
+ return num
48
+
49
+ def Manager(self):
50
+ '''Returns a manager associated with a running server process
51
+
52
+ The managers methods such as `Lock()`, `Condition()` and `Queue()`
53
+ can be used to create shared objects.
54
+ '''
55
+ from .managers import SyncManager
56
+ m = SyncManager(ctx=self.get_context())
57
+ m.start()
58
+ return m
59
+
60
+ def Pipe(self, duplex=True):
61
+ '''Returns two connection object connected by a pipe'''
62
+ from .connection import Pipe
63
+ return Pipe(duplex)
64
+
65
+ def Lock(self):
66
+ '''Returns a non-recursive lock object'''
67
+ from .synchronize import Lock
68
+ return Lock(ctx=self.get_context())
69
+
70
+ def RLock(self):
71
+ '''Returns a recursive lock object'''
72
+ from .synchronize import RLock
73
+ return RLock(ctx=self.get_context())
74
+
75
+ def Condition(self, lock=None):
76
+ '''Returns a condition object'''
77
+ from .synchronize import Condition
78
+ return Condition(lock, ctx=self.get_context())
79
+
80
+ def Semaphore(self, value=1):
81
+ '''Returns a semaphore object'''
82
+ from .synchronize import Semaphore
83
+ return Semaphore(value, ctx=self.get_context())
84
+
85
+ def BoundedSemaphore(self, value=1):
86
+ '''Returns a bounded semaphore object'''
87
+ from .synchronize import BoundedSemaphore
88
+ return BoundedSemaphore(value, ctx=self.get_context())
89
+
90
+ def Event(self):
91
+ '''Returns an event object'''
92
+ from .synchronize import Event
93
+ return Event(ctx=self.get_context())
94
+
95
+ def Barrier(self, parties, action=None, timeout=None):
96
+ '''Returns a barrier object'''
97
+ from .synchronize import Barrier
98
+ return Barrier(parties, action, timeout, ctx=self.get_context())
99
+
100
+ def Queue(self, maxsize=0):
101
+ '''Returns a queue object'''
102
+ from .queues import Queue
103
+ return Queue(maxsize, ctx=self.get_context())
104
+
105
+ def JoinableQueue(self, maxsize=0):
106
+ '''Returns a queue object'''
107
+ from .queues import JoinableQueue
108
+ return JoinableQueue(maxsize, ctx=self.get_context())
109
+
110
+ def SimpleQueue(self):
111
+ '''Returns a queue object'''
112
+ from .queues import SimpleQueue
113
+ return SimpleQueue(ctx=self.get_context())
114
+
115
+ def Pool(self, processes=None, initializer=None, initargs=(),
116
+ maxtasksperchild=None):
117
+ '''Returns a process pool object'''
118
+ from .pool import Pool
119
+ return Pool(processes, initializer, initargs, maxtasksperchild,
120
+ context=self.get_context())
121
+
122
+ def RawValue(self, typecode_or_type, *args):
123
+ '''Returns a shared object'''
124
+ from .sharedctypes import RawValue
125
+ return RawValue(typecode_or_type, *args)
126
+
127
+ def RawArray(self, typecode_or_type, size_or_initializer):
128
+ '''Returns a shared array'''
129
+ from .sharedctypes import RawArray
130
+ return RawArray(typecode_or_type, size_or_initializer)
131
+
132
+ def Value(self, typecode_or_type, *args, lock=True):
133
+ '''Returns a synchronized shared object'''
134
+ from .sharedctypes import Value
135
+ return Value(typecode_or_type, *args, lock=lock,
136
+ ctx=self.get_context())
137
+
138
+ def Array(self, typecode_or_type, size_or_initializer, *, lock=True):
139
+ '''Returns a synchronized shared array'''
140
+ from .sharedctypes import Array
141
+ return Array(typecode_or_type, size_or_initializer, lock=lock,
142
+ ctx=self.get_context())
143
+
144
+ def freeze_support(self):
145
+ '''Check whether this is a fake forked process in a frozen executable.
146
+ If so then run code specified by commandline and exit.
147
+ '''
148
+ if sys.platform == 'win32' and getattr(sys, 'frozen', False):
149
+ from .spawn import freeze_support
150
+ freeze_support()
151
+
152
+ def get_logger(self):
153
+ '''Return package logger -- if it does not already exist then
154
+ it is created.
155
+ '''
156
+ from .util import get_logger
157
+ return get_logger()
158
+
159
+ def log_to_stderr(self, level=None):
160
+ '''Turn on logging and add a handler which prints to stderr'''
161
+ from .util import log_to_stderr
162
+ return log_to_stderr(level)
163
+
164
+ def allow_connection_pickling(self):
165
+ '''Install support for sending connections and sockets
166
+ between processes
167
+ '''
168
+ # This is undocumented. In previous versions of multiprocessing
169
+ # its only effect was to make socket objects inheritable on Windows.
170
+ from . import connection
171
+
172
+ def set_executable(self, executable):
173
+ '''Sets the path to a python.exe or pythonw.exe binary used to run
174
+ child processes instead of sys.executable when using the 'spawn'
175
+ start method. Useful for people embedding Python.
176
+ '''
177
+ from .spawn import set_executable
178
+ set_executable(executable)
179
+
180
+ def set_forkserver_preload(self, module_names):
181
+ '''Set list of module names to try to load in forkserver process.
182
+ This is really just a hint.
183
+ '''
184
+ from .forkserver import set_forkserver_preload
185
+ set_forkserver_preload(module_names)
186
+
187
+ def get_context(self, method=None):
188
+ if method is None:
189
+ return self
190
+ try:
191
+ ctx = _concrete_contexts[method]
192
+ except KeyError:
193
+ raise ValueError('cannot find context for %r' % method) from None
194
+ ctx._check_available()
195
+ return ctx
196
+
197
+ def get_start_method(self, allow_none=False):
198
+ return self._name
199
+
200
+ def set_start_method(self, method, force=False):
201
+ raise ValueError('cannot set start method of concrete context')
202
+
203
+ @property
204
+ def reducer(self):
205
+ '''Controls how objects will be reduced to a form that can be
206
+ shared with other processes.'''
207
+ return globals().get('reduction')
208
+
209
+ @reducer.setter
210
+ def reducer(self, reduction):
211
+ globals()['reduction'] = reduction
212
+
213
+ def _check_available(self):
214
+ pass
215
+
216
+ #
217
+ # Type of default context -- underlying context can be set at most once
218
+ #
219
+
220
+ class Process(process.BaseProcess):
221
+ _start_method = None
222
+ @staticmethod
223
+ def _Popen(process_obj):
224
+ return _default_context.get_context().Process._Popen(process_obj)
225
+
226
+ @staticmethod
227
+ def _after_fork():
228
+ return _default_context.get_context().Process._after_fork()
229
+
230
+ class DefaultContext(BaseContext):
231
+ Process = Process
232
+
233
+ def __init__(self, context):
234
+ self._default_context = context
235
+ self._actual_context = None
236
+
237
+ def get_context(self, method=None):
238
+ if method is None:
239
+ if self._actual_context is None:
240
+ self._actual_context = self._default_context
241
+ return self._actual_context
242
+ else:
243
+ return super().get_context(method)
244
+
245
+ def set_start_method(self, method, force=False):
246
+ if self._actual_context is not None and not force:
247
+ raise RuntimeError('context has already been set')
248
+ if method is None and force:
249
+ self._actual_context = None
250
+ return
251
+ self._actual_context = self.get_context(method)
252
+
253
+ def get_start_method(self, allow_none=False):
254
+ if self._actual_context is None:
255
+ if allow_none:
256
+ return None
257
+ self._actual_context = self._default_context
258
+ return self._actual_context._name
259
+
260
+ def get_all_start_methods(self):
261
+ if sys.platform == 'win32':
262
+ return ['spawn']
263
+ else:
264
+ methods = ['spawn', 'fork'] if sys.platform == 'darwin' else ['fork', 'spawn']
265
+ if reduction.HAVE_SEND_HANDLE:
266
+ methods.append('forkserver')
267
+ return methods
268
+
269
+
270
+ #
271
+ # Context types for fixed start method
272
+ #
273
+
274
+ if sys.platform != 'win32':
275
+
276
+ class ForkProcess(process.BaseProcess):
277
+ _start_method = 'fork'
278
+ @staticmethod
279
+ def _Popen(process_obj):
280
+ from .popen_fork import Popen
281
+ return Popen(process_obj)
282
+
283
+ class SpawnProcess(process.BaseProcess):
284
+ _start_method = 'spawn'
285
+ @staticmethod
286
+ def _Popen(process_obj):
287
+ from .popen_spawn_posix import Popen
288
+ return Popen(process_obj)
289
+
290
+ @staticmethod
291
+ def _after_fork():
292
+ # process is spawned, nothing to do
293
+ pass
294
+
295
+ class ForkServerProcess(process.BaseProcess):
296
+ _start_method = 'forkserver'
297
+ @staticmethod
298
+ def _Popen(process_obj):
299
+ from .popen_forkserver import Popen
300
+ return Popen(process_obj)
301
+
302
+ class ForkContext(BaseContext):
303
+ _name = 'fork'
304
+ Process = ForkProcess
305
+
306
+ class SpawnContext(BaseContext):
307
+ _name = 'spawn'
308
+ Process = SpawnProcess
309
+
310
+ class ForkServerContext(BaseContext):
311
+ _name = 'forkserver'
312
+ Process = ForkServerProcess
313
+ def _check_available(self):
314
+ if not reduction.HAVE_SEND_HANDLE:
315
+ raise ValueError('forkserver start method not available')
316
+
317
+ _concrete_contexts = {
318
+ 'fork': ForkContext(),
319
+ 'spawn': SpawnContext(),
320
+ 'forkserver': ForkServerContext(),
321
+ }
322
+ if sys.platform == 'darwin':
323
+ # bpo-33725: running arbitrary code after fork() is no longer reliable
324
+ # on macOS since macOS 10.14 (Mojave). Use spawn by default instead.
325
+ _default_context = DefaultContext(_concrete_contexts['spawn'])
326
+ else:
327
+ _default_context = DefaultContext(_concrete_contexts['fork'])
328
+
329
+ else:
330
+
331
+ class SpawnProcess(process.BaseProcess):
332
+ _start_method = 'spawn'
333
+ @staticmethod
334
+ def _Popen(process_obj):
335
+ from .popen_spawn_win32 import Popen
336
+ return Popen(process_obj)
337
+
338
+ @staticmethod
339
+ def _after_fork():
340
+ # process is spawned, nothing to do
341
+ pass
342
+
343
+ class SpawnContext(BaseContext):
344
+ _name = 'spawn'
345
+ Process = SpawnProcess
346
+
347
+ _concrete_contexts = {
348
+ 'spawn': SpawnContext(),
349
+ }
350
+ _default_context = DefaultContext(_concrete_contexts['spawn'])
351
+
352
+ #
353
+ # Force the start method
354
+ #
355
+
356
+ def _force_start_method(method):
357
+ _default_context._actual_context = _concrete_contexts[method]
358
+
359
+ #
360
+ # Check that the current thread is spawning a child process
361
+ #
362
+
363
+ _tls = threading.local()
364
+
365
+ def get_spawning_popen():
366
+ return getattr(_tls, 'spawning_popen', None)
367
+
368
+ def set_spawning_popen(popen):
369
+ _tls.spawning_popen = popen
370
+
371
+ def assert_spawning(obj):
372
+ if get_spawning_popen() is None:
373
+ raise RuntimeError(
374
+ '%s objects should only be shared between processes'
375
+ ' through inheritance' % type(obj).__name__
376
+ )
llava/lib/python3.10/multiprocessing/forkserver.py ADDED
@@ -0,0 +1,348 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import errno
2
+ import os
3
+ import selectors
4
+ import signal
5
+ import socket
6
+ import struct
7
+ import sys
8
+ import threading
9
+ import warnings
10
+
11
+ from . import connection
12
+ from . import process
13
+ from .context import reduction
14
+ from . import resource_tracker
15
+ from . import spawn
16
+ from . import util
17
+
18
+ __all__ = ['ensure_running', 'get_inherited_fds', 'connect_to_new_process',
19
+ 'set_forkserver_preload']
20
+
21
+ #
22
+ #
23
+ #
24
+
25
+ MAXFDS_TO_SEND = 256
26
+ SIGNED_STRUCT = struct.Struct('q') # large enough for pid_t
27
+
28
+ #
29
+ # Forkserver class
30
+ #
31
+
32
+ class ForkServer(object):
33
+
34
+ def __init__(self):
35
+ self._forkserver_address = None
36
+ self._forkserver_alive_fd = None
37
+ self._forkserver_pid = None
38
+ self._inherited_fds = None
39
+ self._lock = threading.Lock()
40
+ self._preload_modules = ['__main__']
41
+
42
+ def _stop(self):
43
+ # Method used by unit tests to stop the server
44
+ with self._lock:
45
+ self._stop_unlocked()
46
+
47
+ def _stop_unlocked(self):
48
+ if self._forkserver_pid is None:
49
+ return
50
+
51
+ # close the "alive" file descriptor asks the server to stop
52
+ os.close(self._forkserver_alive_fd)
53
+ self._forkserver_alive_fd = None
54
+
55
+ os.waitpid(self._forkserver_pid, 0)
56
+ self._forkserver_pid = None
57
+
58
+ if not util.is_abstract_socket_namespace(self._forkserver_address):
59
+ os.unlink(self._forkserver_address)
60
+ self._forkserver_address = None
61
+
62
+ def set_forkserver_preload(self, modules_names):
63
+ '''Set list of module names to try to load in forkserver process.'''
64
+ if not all(type(mod) is str for mod in self._preload_modules):
65
+ raise TypeError('module_names must be a list of strings')
66
+ self._preload_modules = modules_names
67
+
68
+ def get_inherited_fds(self):
69
+ '''Return list of fds inherited from parent process.
70
+
71
+ This returns None if the current process was not started by fork
72
+ server.
73
+ '''
74
+ return self._inherited_fds
75
+
76
+ def connect_to_new_process(self, fds):
77
+ '''Request forkserver to create a child process.
78
+
79
+ Returns a pair of fds (status_r, data_w). The calling process can read
80
+ the child process's pid and (eventually) its returncode from status_r.
81
+ The calling process should write to data_w the pickled preparation and
82
+ process data.
83
+ '''
84
+ self.ensure_running()
85
+ if len(fds) + 4 >= MAXFDS_TO_SEND:
86
+ raise ValueError('too many fds')
87
+ with socket.socket(socket.AF_UNIX) as client:
88
+ client.connect(self._forkserver_address)
89
+ parent_r, child_w = os.pipe()
90
+ child_r, parent_w = os.pipe()
91
+ allfds = [child_r, child_w, self._forkserver_alive_fd,
92
+ resource_tracker.getfd()]
93
+ allfds += fds
94
+ try:
95
+ reduction.sendfds(client, allfds)
96
+ return parent_r, parent_w
97
+ except:
98
+ os.close(parent_r)
99
+ os.close(parent_w)
100
+ raise
101
+ finally:
102
+ os.close(child_r)
103
+ os.close(child_w)
104
+
105
+ def ensure_running(self):
106
+ '''Make sure that a fork server is running.
107
+
108
+ This can be called from any process. Note that usually a child
109
+ process will just reuse the forkserver started by its parent, so
110
+ ensure_running() will do nothing.
111
+ '''
112
+ with self._lock:
113
+ resource_tracker.ensure_running()
114
+ if self._forkserver_pid is not None:
115
+ # forkserver was launched before, is it still running?
116
+ pid, status = os.waitpid(self._forkserver_pid, os.WNOHANG)
117
+ if not pid:
118
+ # still alive
119
+ return
120
+ # dead, launch it again
121
+ os.close(self._forkserver_alive_fd)
122
+ self._forkserver_address = None
123
+ self._forkserver_alive_fd = None
124
+ self._forkserver_pid = None
125
+
126
+ cmd = ('from multiprocessing.forkserver import main; ' +
127
+ 'main(%d, %d, %r, **%r)')
128
+
129
+ if self._preload_modules:
130
+ desired_keys = {'main_path', 'sys_path'}
131
+ data = spawn.get_preparation_data('ignore')
132
+ data = {x: y for x, y in data.items() if x in desired_keys}
133
+ else:
134
+ data = {}
135
+
136
+ with socket.socket(socket.AF_UNIX) as listener:
137
+ address = connection.arbitrary_address('AF_UNIX')
138
+ listener.bind(address)
139
+ if not util.is_abstract_socket_namespace(address):
140
+ os.chmod(address, 0o600)
141
+ listener.listen()
142
+
143
+ # all client processes own the write end of the "alive" pipe;
144
+ # when they all terminate the read end becomes ready.
145
+ alive_r, alive_w = os.pipe()
146
+ try:
147
+ fds_to_pass = [listener.fileno(), alive_r]
148
+ cmd %= (listener.fileno(), alive_r, self._preload_modules,
149
+ data)
150
+ exe = spawn.get_executable()
151
+ args = [exe] + util._args_from_interpreter_flags()
152
+ args += ['-c', cmd]
153
+ pid = util.spawnv_passfds(exe, args, fds_to_pass)
154
+ except:
155
+ os.close(alive_w)
156
+ raise
157
+ finally:
158
+ os.close(alive_r)
159
+ self._forkserver_address = address
160
+ self._forkserver_alive_fd = alive_w
161
+ self._forkserver_pid = pid
162
+
163
+ #
164
+ #
165
+ #
166
+
167
+ def main(listener_fd, alive_r, preload, main_path=None, sys_path=None):
168
+ '''Run forkserver.'''
169
+ if preload:
170
+ if '__main__' in preload and main_path is not None:
171
+ process.current_process()._inheriting = True
172
+ try:
173
+ spawn.import_main_path(main_path)
174
+ finally:
175
+ del process.current_process()._inheriting
176
+ for modname in preload:
177
+ try:
178
+ __import__(modname)
179
+ except ImportError:
180
+ pass
181
+
182
+ util._close_stdin()
183
+
184
+ sig_r, sig_w = os.pipe()
185
+ os.set_blocking(sig_r, False)
186
+ os.set_blocking(sig_w, False)
187
+
188
+ def sigchld_handler(*_unused):
189
+ # Dummy signal handler, doesn't do anything
190
+ pass
191
+
192
+ handlers = {
193
+ # unblocking SIGCHLD allows the wakeup fd to notify our event loop
194
+ signal.SIGCHLD: sigchld_handler,
195
+ # protect the process from ^C
196
+ signal.SIGINT: signal.SIG_IGN,
197
+ }
198
+ old_handlers = {sig: signal.signal(sig, val)
199
+ for (sig, val) in handlers.items()}
200
+
201
+ # calling os.write() in the Python signal handler is racy
202
+ signal.set_wakeup_fd(sig_w)
203
+
204
+ # map child pids to client fds
205
+ pid_to_fd = {}
206
+
207
+ with socket.socket(socket.AF_UNIX, fileno=listener_fd) as listener, \
208
+ selectors.DefaultSelector() as selector:
209
+ _forkserver._forkserver_address = listener.getsockname()
210
+
211
+ selector.register(listener, selectors.EVENT_READ)
212
+ selector.register(alive_r, selectors.EVENT_READ)
213
+ selector.register(sig_r, selectors.EVENT_READ)
214
+
215
+ while True:
216
+ try:
217
+ while True:
218
+ rfds = [key.fileobj for (key, events) in selector.select()]
219
+ if rfds:
220
+ break
221
+
222
+ if alive_r in rfds:
223
+ # EOF because no more client processes left
224
+ assert os.read(alive_r, 1) == b'', "Not at EOF?"
225
+ raise SystemExit
226
+
227
+ if sig_r in rfds:
228
+ # Got SIGCHLD
229
+ os.read(sig_r, 65536) # exhaust
230
+ while True:
231
+ # Scan for child processes
232
+ try:
233
+ pid, sts = os.waitpid(-1, os.WNOHANG)
234
+ except ChildProcessError:
235
+ break
236
+ if pid == 0:
237
+ break
238
+ child_w = pid_to_fd.pop(pid, None)
239
+ if child_w is not None:
240
+ returncode = os.waitstatus_to_exitcode(sts)
241
+
242
+ # Send exit code to client process
243
+ try:
244
+ write_signed(child_w, returncode)
245
+ except BrokenPipeError:
246
+ # client vanished
247
+ pass
248
+ os.close(child_w)
249
+ else:
250
+ # This shouldn't happen really
251
+ warnings.warn('forkserver: waitpid returned '
252
+ 'unexpected pid %d' % pid)
253
+
254
+ if listener in rfds:
255
+ # Incoming fork request
256
+ with listener.accept()[0] as s:
257
+ # Receive fds from client
258
+ fds = reduction.recvfds(s, MAXFDS_TO_SEND + 1)
259
+ if len(fds) > MAXFDS_TO_SEND:
260
+ raise RuntimeError(
261
+ "Too many ({0:n}) fds to send".format(
262
+ len(fds)))
263
+ child_r, child_w, *fds = fds
264
+ s.close()
265
+ pid = os.fork()
266
+ if pid == 0:
267
+ # Child
268
+ code = 1
269
+ try:
270
+ listener.close()
271
+ selector.close()
272
+ unused_fds = [alive_r, child_w, sig_r, sig_w]
273
+ unused_fds.extend(pid_to_fd.values())
274
+ code = _serve_one(child_r, fds,
275
+ unused_fds,
276
+ old_handlers)
277
+ except Exception:
278
+ sys.excepthook(*sys.exc_info())
279
+ sys.stderr.flush()
280
+ finally:
281
+ os._exit(code)
282
+ else:
283
+ # Send pid to client process
284
+ try:
285
+ write_signed(child_w, pid)
286
+ except BrokenPipeError:
287
+ # client vanished
288
+ pass
289
+ pid_to_fd[pid] = child_w
290
+ os.close(child_r)
291
+ for fd in fds:
292
+ os.close(fd)
293
+
294
+ except OSError as e:
295
+ if e.errno != errno.ECONNABORTED:
296
+ raise
297
+
298
+
299
+ def _serve_one(child_r, fds, unused_fds, handlers):
300
+ # close unnecessary stuff and reset signal handlers
301
+ signal.set_wakeup_fd(-1)
302
+ for sig, val in handlers.items():
303
+ signal.signal(sig, val)
304
+ for fd in unused_fds:
305
+ os.close(fd)
306
+
307
+ (_forkserver._forkserver_alive_fd,
308
+ resource_tracker._resource_tracker._fd,
309
+ *_forkserver._inherited_fds) = fds
310
+
311
+ # Run process object received over pipe
312
+ parent_sentinel = os.dup(child_r)
313
+ code = spawn._main(child_r, parent_sentinel)
314
+
315
+ return code
316
+
317
+
318
+ #
319
+ # Read and write signed numbers
320
+ #
321
+
322
+ def read_signed(fd):
323
+ data = b''
324
+ length = SIGNED_STRUCT.size
325
+ while len(data) < length:
326
+ s = os.read(fd, length - len(data))
327
+ if not s:
328
+ raise EOFError('unexpected EOF')
329
+ data += s
330
+ return SIGNED_STRUCT.unpack(data)[0]
331
+
332
+ def write_signed(fd, n):
333
+ msg = SIGNED_STRUCT.pack(n)
334
+ while msg:
335
+ nbytes = os.write(fd, msg)
336
+ if nbytes == 0:
337
+ raise RuntimeError('should not get here')
338
+ msg = msg[nbytes:]
339
+
340
+ #
341
+ #
342
+ #
343
+
344
+ _forkserver = ForkServer()
345
+ ensure_running = _forkserver.ensure_running
346
+ get_inherited_fds = _forkserver.get_inherited_fds
347
+ connect_to_new_process = _forkserver.connect_to_new_process
348
+ set_forkserver_preload = _forkserver.set_forkserver_preload
llava/lib/python3.10/multiprocessing/managers.py ADDED
@@ -0,0 +1,1378 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Module providing manager classes for dealing
3
+ # with shared objects
4
+ #
5
+ # multiprocessing/managers.py
6
+ #
7
+ # Copyright (c) 2006-2008, R Oudkerk
8
+ # Licensed to PSF under a Contributor Agreement.
9
+ #
10
+
11
+ __all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token' ]
12
+
13
+ #
14
+ # Imports
15
+ #
16
+
17
+ import sys
18
+ import threading
19
+ import signal
20
+ import array
21
+ import queue
22
+ import time
23
+ import types
24
+ import os
25
+ from os import getpid
26
+
27
+ from traceback import format_exc
28
+
29
+ from . import connection
30
+ from .context import reduction, get_spawning_popen, ProcessError
31
+ from . import pool
32
+ from . import process
33
+ from . import util
34
+ from . import get_context
35
+ try:
36
+ from . import shared_memory
37
+ except ImportError:
38
+ HAS_SHMEM = False
39
+ else:
40
+ HAS_SHMEM = True
41
+ __all__.append('SharedMemoryManager')
42
+
43
+ #
44
+ # Register some things for pickling
45
+ #
46
+
47
+ def reduce_array(a):
48
+ return array.array, (a.typecode, a.tobytes())
49
+ reduction.register(array.array, reduce_array)
50
+
51
+ view_types = [type(getattr({}, name)()) for name in ('items','keys','values')]
52
+ if view_types[0] is not list: # only needed in Py3.0
53
+ def rebuild_as_list(obj):
54
+ return list, (list(obj),)
55
+ for view_type in view_types:
56
+ reduction.register(view_type, rebuild_as_list)
57
+
58
+ #
59
+ # Type for identifying shared objects
60
+ #
61
+
62
+ class Token(object):
63
+ '''
64
+ Type to uniquely identify a shared object
65
+ '''
66
+ __slots__ = ('typeid', 'address', 'id')
67
+
68
+ def __init__(self, typeid, address, id):
69
+ (self.typeid, self.address, self.id) = (typeid, address, id)
70
+
71
+ def __getstate__(self):
72
+ return (self.typeid, self.address, self.id)
73
+
74
+ def __setstate__(self, state):
75
+ (self.typeid, self.address, self.id) = state
76
+
77
+ def __repr__(self):
78
+ return '%s(typeid=%r, address=%r, id=%r)' % \
79
+ (self.__class__.__name__, self.typeid, self.address, self.id)
80
+
81
+ #
82
+ # Function for communication with a manager's server process
83
+ #
84
+
85
+ def dispatch(c, id, methodname, args=(), kwds={}):
86
+ '''
87
+ Send a message to manager using connection `c` and return response
88
+ '''
89
+ c.send((id, methodname, args, kwds))
90
+ kind, result = c.recv()
91
+ if kind == '#RETURN':
92
+ return result
93
+ raise convert_to_error(kind, result)
94
+
95
+ def convert_to_error(kind, result):
96
+ if kind == '#ERROR':
97
+ return result
98
+ elif kind in ('#TRACEBACK', '#UNSERIALIZABLE'):
99
+ if not isinstance(result, str):
100
+ raise TypeError(
101
+ "Result {0!r} (kind '{1}') type is {2}, not str".format(
102
+ result, kind, type(result)))
103
+ if kind == '#UNSERIALIZABLE':
104
+ return RemoteError('Unserializable message: %s\n' % result)
105
+ else:
106
+ return RemoteError(result)
107
+ else:
108
+ return ValueError('Unrecognized message type {!r}'.format(kind))
109
+
110
+ class RemoteError(Exception):
111
+ def __str__(self):
112
+ return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75)
113
+
114
+ #
115
+ # Functions for finding the method names of an object
116
+ #
117
+
118
+ def all_methods(obj):
119
+ '''
120
+ Return a list of names of methods of `obj`
121
+ '''
122
+ temp = []
123
+ for name in dir(obj):
124
+ func = getattr(obj, name)
125
+ if callable(func):
126
+ temp.append(name)
127
+ return temp
128
+
129
+ def public_methods(obj):
130
+ '''
131
+ Return a list of names of methods of `obj` which do not start with '_'
132
+ '''
133
+ return [name for name in all_methods(obj) if name[0] != '_']
134
+
135
+ #
136
+ # Server which is run in a process controlled by a manager
137
+ #
138
+
139
+ class Server(object):
140
+ '''
141
+ Server class which runs in a process controlled by a manager object
142
+ '''
143
+ public = ['shutdown', 'create', 'accept_connection', 'get_methods',
144
+ 'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref']
145
+
146
+ def __init__(self, registry, address, authkey, serializer):
147
+ if not isinstance(authkey, bytes):
148
+ raise TypeError(
149
+ "Authkey {0!r} is type {1!s}, not bytes".format(
150
+ authkey, type(authkey)))
151
+ self.registry = registry
152
+ self.authkey = process.AuthenticationString(authkey)
153
+ Listener, Client = listener_client[serializer]
154
+
155
+ # do authentication later
156
+ self.listener = Listener(address=address, backlog=16)
157
+ self.address = self.listener.address
158
+
159
+ self.id_to_obj = {'0': (None, ())}
160
+ self.id_to_refcount = {}
161
+ self.id_to_local_proxy_obj = {}
162
+ self.mutex = threading.Lock()
163
+
164
+ def serve_forever(self):
165
+ '''
166
+ Run the server forever
167
+ '''
168
+ self.stop_event = threading.Event()
169
+ process.current_process()._manager_server = self
170
+ try:
171
+ accepter = threading.Thread(target=self.accepter)
172
+ accepter.daemon = True
173
+ accepter.start()
174
+ try:
175
+ while not self.stop_event.is_set():
176
+ self.stop_event.wait(1)
177
+ except (KeyboardInterrupt, SystemExit):
178
+ pass
179
+ finally:
180
+ if sys.stdout != sys.__stdout__: # what about stderr?
181
+ util.debug('resetting stdout, stderr')
182
+ sys.stdout = sys.__stdout__
183
+ sys.stderr = sys.__stderr__
184
+ sys.exit(0)
185
+
186
+ def accepter(self):
187
+ while True:
188
+ try:
189
+ c = self.listener.accept()
190
+ except OSError:
191
+ continue
192
+ t = threading.Thread(target=self.handle_request, args=(c,))
193
+ t.daemon = True
194
+ t.start()
195
+
196
+ def _handle_request(self, c):
197
+ request = None
198
+ try:
199
+ connection.deliver_challenge(c, self.authkey)
200
+ connection.answer_challenge(c, self.authkey)
201
+ request = c.recv()
202
+ ignore, funcname, args, kwds = request
203
+ assert funcname in self.public, '%r unrecognized' % funcname
204
+ func = getattr(self, funcname)
205
+ except Exception:
206
+ msg = ('#TRACEBACK', format_exc())
207
+ else:
208
+ try:
209
+ result = func(c, *args, **kwds)
210
+ except Exception:
211
+ msg = ('#TRACEBACK', format_exc())
212
+ else:
213
+ msg = ('#RETURN', result)
214
+
215
+ try:
216
+ c.send(msg)
217
+ except Exception as e:
218
+ try:
219
+ c.send(('#TRACEBACK', format_exc()))
220
+ except Exception:
221
+ pass
222
+ util.info('Failure to send message: %r', msg)
223
+ util.info(' ... request was %r', request)
224
+ util.info(' ... exception was %r', e)
225
+
226
+ def handle_request(self, conn):
227
+ '''
228
+ Handle a new connection
229
+ '''
230
+ try:
231
+ self._handle_request(conn)
232
+ except SystemExit:
233
+ # Server.serve_client() calls sys.exit(0) on EOF
234
+ pass
235
+ finally:
236
+ conn.close()
237
+
238
+ def serve_client(self, conn):
239
+ '''
240
+ Handle requests from the proxies in a particular process/thread
241
+ '''
242
+ util.debug('starting server thread to service %r',
243
+ threading.current_thread().name)
244
+
245
+ recv = conn.recv
246
+ send = conn.send
247
+ id_to_obj = self.id_to_obj
248
+
249
+ while not self.stop_event.is_set():
250
+
251
+ try:
252
+ methodname = obj = None
253
+ request = recv()
254
+ ident, methodname, args, kwds = request
255
+ try:
256
+ obj, exposed, gettypeid = id_to_obj[ident]
257
+ except KeyError as ke:
258
+ try:
259
+ obj, exposed, gettypeid = \
260
+ self.id_to_local_proxy_obj[ident]
261
+ except KeyError:
262
+ raise ke
263
+
264
+ if methodname not in exposed:
265
+ raise AttributeError(
266
+ 'method %r of %r object is not in exposed=%r' %
267
+ (methodname, type(obj), exposed)
268
+ )
269
+
270
+ function = getattr(obj, methodname)
271
+
272
+ try:
273
+ res = function(*args, **kwds)
274
+ except Exception as e:
275
+ msg = ('#ERROR', e)
276
+ else:
277
+ typeid = gettypeid and gettypeid.get(methodname, None)
278
+ if typeid:
279
+ rident, rexposed = self.create(conn, typeid, res)
280
+ token = Token(typeid, self.address, rident)
281
+ msg = ('#PROXY', (rexposed, token))
282
+ else:
283
+ msg = ('#RETURN', res)
284
+
285
+ except AttributeError:
286
+ if methodname is None:
287
+ msg = ('#TRACEBACK', format_exc())
288
+ else:
289
+ try:
290
+ fallback_func = self.fallback_mapping[methodname]
291
+ result = fallback_func(
292
+ self, conn, ident, obj, *args, **kwds
293
+ )
294
+ msg = ('#RETURN', result)
295
+ except Exception:
296
+ msg = ('#TRACEBACK', format_exc())
297
+
298
+ except EOFError:
299
+ util.debug('got EOF -- exiting thread serving %r',
300
+ threading.current_thread().name)
301
+ sys.exit(0)
302
+
303
+ except Exception:
304
+ msg = ('#TRACEBACK', format_exc())
305
+
306
+ try:
307
+ try:
308
+ send(msg)
309
+ except Exception:
310
+ send(('#UNSERIALIZABLE', format_exc()))
311
+ except Exception as e:
312
+ util.info('exception in thread serving %r',
313
+ threading.current_thread().name)
314
+ util.info(' ... message was %r', msg)
315
+ util.info(' ... exception was %r', e)
316
+ conn.close()
317
+ sys.exit(1)
318
+
319
+ def fallback_getvalue(self, conn, ident, obj):
320
+ return obj
321
+
322
+ def fallback_str(self, conn, ident, obj):
323
+ return str(obj)
324
+
325
+ def fallback_repr(self, conn, ident, obj):
326
+ return repr(obj)
327
+
328
+ fallback_mapping = {
329
+ '__str__':fallback_str,
330
+ '__repr__':fallback_repr,
331
+ '#GETVALUE':fallback_getvalue
332
+ }
333
+
334
+ def dummy(self, c):
335
+ pass
336
+
337
+ def debug_info(self, c):
338
+ '''
339
+ Return some info --- useful to spot problems with refcounting
340
+ '''
341
+ # Perhaps include debug info about 'c'?
342
+ with self.mutex:
343
+ result = []
344
+ keys = list(self.id_to_refcount.keys())
345
+ keys.sort()
346
+ for ident in keys:
347
+ if ident != '0':
348
+ result.append(' %s: refcount=%s\n %s' %
349
+ (ident, self.id_to_refcount[ident],
350
+ str(self.id_to_obj[ident][0])[:75]))
351
+ return '\n'.join(result)
352
+
353
+ def number_of_objects(self, c):
354
+ '''
355
+ Number of shared objects
356
+ '''
357
+ # Doesn't use (len(self.id_to_obj) - 1) as we shouldn't count ident='0'
358
+ return len(self.id_to_refcount)
359
+
360
+ def shutdown(self, c):
361
+ '''
362
+ Shutdown this process
363
+ '''
364
+ try:
365
+ util.debug('manager received shutdown message')
366
+ c.send(('#RETURN', None))
367
+ except:
368
+ import traceback
369
+ traceback.print_exc()
370
+ finally:
371
+ self.stop_event.set()
372
+
373
+ def create(self, c, typeid, /, *args, **kwds):
374
+ '''
375
+ Create a new shared object and return its id
376
+ '''
377
+ with self.mutex:
378
+ callable, exposed, method_to_typeid, proxytype = \
379
+ self.registry[typeid]
380
+
381
+ if callable is None:
382
+ if kwds or (len(args) != 1):
383
+ raise ValueError(
384
+ "Without callable, must have one non-keyword argument")
385
+ obj = args[0]
386
+ else:
387
+ obj = callable(*args, **kwds)
388
+
389
+ if exposed is None:
390
+ exposed = public_methods(obj)
391
+ if method_to_typeid is not None:
392
+ if not isinstance(method_to_typeid, dict):
393
+ raise TypeError(
394
+ "Method_to_typeid {0!r}: type {1!s}, not dict".format(
395
+ method_to_typeid, type(method_to_typeid)))
396
+ exposed = list(exposed) + list(method_to_typeid)
397
+
398
+ ident = '%x' % id(obj) # convert to string because xmlrpclib
399
+ # only has 32 bit signed integers
400
+ util.debug('%r callable returned object with id %r', typeid, ident)
401
+
402
+ self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid)
403
+ if ident not in self.id_to_refcount:
404
+ self.id_to_refcount[ident] = 0
405
+
406
+ self.incref(c, ident)
407
+ return ident, tuple(exposed)
408
+
409
+ def get_methods(self, c, token):
410
+ '''
411
+ Return the methods of the shared object indicated by token
412
+ '''
413
+ return tuple(self.id_to_obj[token.id][1])
414
+
415
+ def accept_connection(self, c, name):
416
+ '''
417
+ Spawn a new thread to serve this connection
418
+ '''
419
+ threading.current_thread().name = name
420
+ c.send(('#RETURN', None))
421
+ self.serve_client(c)
422
+
423
+ def incref(self, c, ident):
424
+ with self.mutex:
425
+ try:
426
+ self.id_to_refcount[ident] += 1
427
+ except KeyError as ke:
428
+ # If no external references exist but an internal (to the
429
+ # manager) still does and a new external reference is created
430
+ # from it, restore the manager's tracking of it from the
431
+ # previously stashed internal ref.
432
+ if ident in self.id_to_local_proxy_obj:
433
+ self.id_to_refcount[ident] = 1
434
+ self.id_to_obj[ident] = \
435
+ self.id_to_local_proxy_obj[ident]
436
+ obj, exposed, gettypeid = self.id_to_obj[ident]
437
+ util.debug('Server re-enabled tracking & INCREF %r', ident)
438
+ else:
439
+ raise ke
440
+
441
+ def decref(self, c, ident):
442
+ if ident not in self.id_to_refcount and \
443
+ ident in self.id_to_local_proxy_obj:
444
+ util.debug('Server DECREF skipping %r', ident)
445
+ return
446
+
447
+ with self.mutex:
448
+ if self.id_to_refcount[ident] <= 0:
449
+ raise AssertionError(
450
+ "Id {0!s} ({1!r}) has refcount {2:n}, not 1+".format(
451
+ ident, self.id_to_obj[ident],
452
+ self.id_to_refcount[ident]))
453
+ self.id_to_refcount[ident] -= 1
454
+ if self.id_to_refcount[ident] == 0:
455
+ del self.id_to_refcount[ident]
456
+
457
+ if ident not in self.id_to_refcount:
458
+ # Two-step process in case the object turns out to contain other
459
+ # proxy objects (e.g. a managed list of managed lists).
460
+ # Otherwise, deleting self.id_to_obj[ident] would trigger the
461
+ # deleting of the stored value (another managed object) which would
462
+ # in turn attempt to acquire the mutex that is already held here.
463
+ self.id_to_obj[ident] = (None, (), None) # thread-safe
464
+ util.debug('disposing of obj with id %r', ident)
465
+ with self.mutex:
466
+ del self.id_to_obj[ident]
467
+
468
+
469
+ #
470
+ # Class to represent state of a manager
471
+ #
472
+
473
+ class State(object):
474
+ __slots__ = ['value']
475
+ INITIAL = 0
476
+ STARTED = 1
477
+ SHUTDOWN = 2
478
+
479
+ #
480
+ # Mapping from serializer name to Listener and Client types
481
+ #
482
+
483
+ listener_client = {
484
+ 'pickle' : (connection.Listener, connection.Client),
485
+ 'xmlrpclib' : (connection.XmlListener, connection.XmlClient)
486
+ }
487
+
488
+ #
489
+ # Definition of BaseManager
490
+ #
491
+
492
+ class BaseManager(object):
493
+ '''
494
+ Base class for managers
495
+ '''
496
+ _registry = {}
497
+ _Server = Server
498
+
499
+ def __init__(self, address=None, authkey=None, serializer='pickle',
500
+ ctx=None):
501
+ if authkey is None:
502
+ authkey = process.current_process().authkey
503
+ self._address = address # XXX not final address if eg ('', 0)
504
+ self._authkey = process.AuthenticationString(authkey)
505
+ self._state = State()
506
+ self._state.value = State.INITIAL
507
+ self._serializer = serializer
508
+ self._Listener, self._Client = listener_client[serializer]
509
+ self._ctx = ctx or get_context()
510
+
511
+ def get_server(self):
512
+ '''
513
+ Return server object with serve_forever() method and address attribute
514
+ '''
515
+ if self._state.value != State.INITIAL:
516
+ if self._state.value == State.STARTED:
517
+ raise ProcessError("Already started server")
518
+ elif self._state.value == State.SHUTDOWN:
519
+ raise ProcessError("Manager has shut down")
520
+ else:
521
+ raise ProcessError(
522
+ "Unknown state {!r}".format(self._state.value))
523
+ return Server(self._registry, self._address,
524
+ self._authkey, self._serializer)
525
+
526
+ def connect(self):
527
+ '''
528
+ Connect manager object to the server process
529
+ '''
530
+ Listener, Client = listener_client[self._serializer]
531
+ conn = Client(self._address, authkey=self._authkey)
532
+ dispatch(conn, None, 'dummy')
533
+ self._state.value = State.STARTED
534
+
535
+ def start(self, initializer=None, initargs=()):
536
+ '''
537
+ Spawn a server process for this manager object
538
+ '''
539
+ if self._state.value != State.INITIAL:
540
+ if self._state.value == State.STARTED:
541
+ raise ProcessError("Already started server")
542
+ elif self._state.value == State.SHUTDOWN:
543
+ raise ProcessError("Manager has shut down")
544
+ else:
545
+ raise ProcessError(
546
+ "Unknown state {!r}".format(self._state.value))
547
+
548
+ if initializer is not None and not callable(initializer):
549
+ raise TypeError('initializer must be a callable')
550
+
551
+ # pipe over which we will retrieve address of server
552
+ reader, writer = connection.Pipe(duplex=False)
553
+
554
+ # spawn process which runs a server
555
+ self._process = self._ctx.Process(
556
+ target=type(self)._run_server,
557
+ args=(self._registry, self._address, self._authkey,
558
+ self._serializer, writer, initializer, initargs),
559
+ )
560
+ ident = ':'.join(str(i) for i in self._process._identity)
561
+ self._process.name = type(self).__name__ + '-' + ident
562
+ self._process.start()
563
+
564
+ # get address of server
565
+ writer.close()
566
+ self._address = reader.recv()
567
+ reader.close()
568
+
569
+ # register a finalizer
570
+ self._state.value = State.STARTED
571
+ self.shutdown = util.Finalize(
572
+ self, type(self)._finalize_manager,
573
+ args=(self._process, self._address, self._authkey,
574
+ self._state, self._Client),
575
+ exitpriority=0
576
+ )
577
+
578
+ @classmethod
579
+ def _run_server(cls, registry, address, authkey, serializer, writer,
580
+ initializer=None, initargs=()):
581
+ '''
582
+ Create a server, report its address and run it
583
+ '''
584
+ # bpo-36368: protect server process from KeyboardInterrupt signals
585
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
586
+
587
+ if initializer is not None:
588
+ initializer(*initargs)
589
+
590
+ # create server
591
+ server = cls._Server(registry, address, authkey, serializer)
592
+
593
+ # inform parent process of the server's address
594
+ writer.send(server.address)
595
+ writer.close()
596
+
597
+ # run the manager
598
+ util.info('manager serving at %r', server.address)
599
+ server.serve_forever()
600
+
601
+ def _create(self, typeid, /, *args, **kwds):
602
+ '''
603
+ Create a new shared object; return the token and exposed tuple
604
+ '''
605
+ assert self._state.value == State.STARTED, 'server not yet started'
606
+ conn = self._Client(self._address, authkey=self._authkey)
607
+ try:
608
+ id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds)
609
+ finally:
610
+ conn.close()
611
+ return Token(typeid, self._address, id), exposed
612
+
613
+ def join(self, timeout=None):
614
+ '''
615
+ Join the manager process (if it has been spawned)
616
+ '''
617
+ if self._process is not None:
618
+ self._process.join(timeout)
619
+ if not self._process.is_alive():
620
+ self._process = None
621
+
622
+ def _debug_info(self):
623
+ '''
624
+ Return some info about the servers shared objects and connections
625
+ '''
626
+ conn = self._Client(self._address, authkey=self._authkey)
627
+ try:
628
+ return dispatch(conn, None, 'debug_info')
629
+ finally:
630
+ conn.close()
631
+
632
+ def _number_of_objects(self):
633
+ '''
634
+ Return the number of shared objects
635
+ '''
636
+ conn = self._Client(self._address, authkey=self._authkey)
637
+ try:
638
+ return dispatch(conn, None, 'number_of_objects')
639
+ finally:
640
+ conn.close()
641
+
642
+ def __enter__(self):
643
+ if self._state.value == State.INITIAL:
644
+ self.start()
645
+ if self._state.value != State.STARTED:
646
+ if self._state.value == State.INITIAL:
647
+ raise ProcessError("Unable to start server")
648
+ elif self._state.value == State.SHUTDOWN:
649
+ raise ProcessError("Manager has shut down")
650
+ else:
651
+ raise ProcessError(
652
+ "Unknown state {!r}".format(self._state.value))
653
+ return self
654
+
655
+ def __exit__(self, exc_type, exc_val, exc_tb):
656
+ self.shutdown()
657
+
658
+ @staticmethod
659
+ def _finalize_manager(process, address, authkey, state, _Client):
660
+ '''
661
+ Shutdown the manager process; will be registered as a finalizer
662
+ '''
663
+ if process.is_alive():
664
+ util.info('sending shutdown message to manager')
665
+ try:
666
+ conn = _Client(address, authkey=authkey)
667
+ try:
668
+ dispatch(conn, None, 'shutdown')
669
+ finally:
670
+ conn.close()
671
+ except Exception:
672
+ pass
673
+
674
+ process.join(timeout=1.0)
675
+ if process.is_alive():
676
+ util.info('manager still alive')
677
+ if hasattr(process, 'terminate'):
678
+ util.info('trying to `terminate()` manager process')
679
+ process.terminate()
680
+ process.join(timeout=1.0)
681
+ if process.is_alive():
682
+ util.info('manager still alive after terminate')
683
+
684
+ state.value = State.SHUTDOWN
685
+ try:
686
+ del BaseProxy._address_to_local[address]
687
+ except KeyError:
688
+ pass
689
+
690
+ @property
691
+ def address(self):
692
+ return self._address
693
+
694
+ @classmethod
695
+ def register(cls, typeid, callable=None, proxytype=None, exposed=None,
696
+ method_to_typeid=None, create_method=True):
697
+ '''
698
+ Register a typeid with the manager type
699
+ '''
700
+ if '_registry' not in cls.__dict__:
701
+ cls._registry = cls._registry.copy()
702
+
703
+ if proxytype is None:
704
+ proxytype = AutoProxy
705
+
706
+ exposed = exposed or getattr(proxytype, '_exposed_', None)
707
+
708
+ method_to_typeid = method_to_typeid or \
709
+ getattr(proxytype, '_method_to_typeid_', None)
710
+
711
+ if method_to_typeid:
712
+ for key, value in list(method_to_typeid.items()): # isinstance?
713
+ assert type(key) is str, '%r is not a string' % key
714
+ assert type(value) is str, '%r is not a string' % value
715
+
716
+ cls._registry[typeid] = (
717
+ callable, exposed, method_to_typeid, proxytype
718
+ )
719
+
720
+ if create_method:
721
+ def temp(self, /, *args, **kwds):
722
+ util.debug('requesting creation of a shared %r object', typeid)
723
+ token, exp = self._create(typeid, *args, **kwds)
724
+ proxy = proxytype(
725
+ token, self._serializer, manager=self,
726
+ authkey=self._authkey, exposed=exp
727
+ )
728
+ conn = self._Client(token.address, authkey=self._authkey)
729
+ dispatch(conn, None, 'decref', (token.id,))
730
+ return proxy
731
+ temp.__name__ = typeid
732
+ setattr(cls, typeid, temp)
733
+
734
+ #
735
+ # Subclass of set which get cleared after a fork
736
+ #
737
+
738
+ class ProcessLocalSet(set):
739
+ def __init__(self):
740
+ util.register_after_fork(self, lambda obj: obj.clear())
741
+ def __reduce__(self):
742
+ return type(self), ()
743
+
744
+ #
745
+ # Definition of BaseProxy
746
+ #
747
+
748
+ class BaseProxy(object):
749
+ '''
750
+ A base for proxies of shared objects
751
+ '''
752
+ _address_to_local = {}
753
+ _mutex = util.ForkAwareThreadLock()
754
+
755
+ def __init__(self, token, serializer, manager=None,
756
+ authkey=None, exposed=None, incref=True, manager_owned=False):
757
+ with BaseProxy._mutex:
758
+ tls_idset = BaseProxy._address_to_local.get(token.address, None)
759
+ if tls_idset is None:
760
+ tls_idset = util.ForkAwareLocal(), ProcessLocalSet()
761
+ BaseProxy._address_to_local[token.address] = tls_idset
762
+
763
+ # self._tls is used to record the connection used by this
764
+ # thread to communicate with the manager at token.address
765
+ self._tls = tls_idset[0]
766
+
767
+ # self._idset is used to record the identities of all shared
768
+ # objects for which the current process owns references and
769
+ # which are in the manager at token.address
770
+ self._idset = tls_idset[1]
771
+
772
+ self._token = token
773
+ self._id = self._token.id
774
+ self._manager = manager
775
+ self._serializer = serializer
776
+ self._Client = listener_client[serializer][1]
777
+
778
+ # Should be set to True only when a proxy object is being created
779
+ # on the manager server; primary use case: nested proxy objects.
780
+ # RebuildProxy detects when a proxy is being created on the manager
781
+ # and sets this value appropriately.
782
+ self._owned_by_manager = manager_owned
783
+
784
+ if authkey is not None:
785
+ self._authkey = process.AuthenticationString(authkey)
786
+ elif self._manager is not None:
787
+ self._authkey = self._manager._authkey
788
+ else:
789
+ self._authkey = process.current_process().authkey
790
+
791
+ if incref:
792
+ self._incref()
793
+
794
+ util.register_after_fork(self, BaseProxy._after_fork)
795
+
796
+ def _connect(self):
797
+ util.debug('making connection to manager')
798
+ name = process.current_process().name
799
+ if threading.current_thread().name != 'MainThread':
800
+ name += '|' + threading.current_thread().name
801
+ conn = self._Client(self._token.address, authkey=self._authkey)
802
+ dispatch(conn, None, 'accept_connection', (name,))
803
+ self._tls.connection = conn
804
+
805
+ def _callmethod(self, methodname, args=(), kwds={}):
806
+ '''
807
+ Try to call a method of the referent and return a copy of the result
808
+ '''
809
+ try:
810
+ conn = self._tls.connection
811
+ except AttributeError:
812
+ util.debug('thread %r does not own a connection',
813
+ threading.current_thread().name)
814
+ self._connect()
815
+ conn = self._tls.connection
816
+
817
+ conn.send((self._id, methodname, args, kwds))
818
+ kind, result = conn.recv()
819
+
820
+ if kind == '#RETURN':
821
+ return result
822
+ elif kind == '#PROXY':
823
+ exposed, token = result
824
+ proxytype = self._manager._registry[token.typeid][-1]
825
+ token.address = self._token.address
826
+ proxy = proxytype(
827
+ token, self._serializer, manager=self._manager,
828
+ authkey=self._authkey, exposed=exposed
829
+ )
830
+ conn = self._Client(token.address, authkey=self._authkey)
831
+ dispatch(conn, None, 'decref', (token.id,))
832
+ return proxy
833
+ raise convert_to_error(kind, result)
834
+
835
+ def _getvalue(self):
836
+ '''
837
+ Get a copy of the value of the referent
838
+ '''
839
+ return self._callmethod('#GETVALUE')
840
+
841
+ def _incref(self):
842
+ if self._owned_by_manager:
843
+ util.debug('owned_by_manager skipped INCREF of %r', self._token.id)
844
+ return
845
+
846
+ conn = self._Client(self._token.address, authkey=self._authkey)
847
+ dispatch(conn, None, 'incref', (self._id,))
848
+ util.debug('INCREF %r', self._token.id)
849
+
850
+ self._idset.add(self._id)
851
+
852
+ state = self._manager and self._manager._state
853
+
854
+ self._close = util.Finalize(
855
+ self, BaseProxy._decref,
856
+ args=(self._token, self._authkey, state,
857
+ self._tls, self._idset, self._Client),
858
+ exitpriority=10
859
+ )
860
+
861
+ @staticmethod
862
+ def _decref(token, authkey, state, tls, idset, _Client):
863
+ idset.discard(token.id)
864
+
865
+ # check whether manager is still alive
866
+ if state is None or state.value == State.STARTED:
867
+ # tell manager this process no longer cares about referent
868
+ try:
869
+ util.debug('DECREF %r', token.id)
870
+ conn = _Client(token.address, authkey=authkey)
871
+ dispatch(conn, None, 'decref', (token.id,))
872
+ except Exception as e:
873
+ util.debug('... decref failed %s', e)
874
+
875
+ else:
876
+ util.debug('DECREF %r -- manager already shutdown', token.id)
877
+
878
+ # check whether we can close this thread's connection because
879
+ # the process owns no more references to objects for this manager
880
+ if not idset and hasattr(tls, 'connection'):
881
+ util.debug('thread %r has no more proxies so closing conn',
882
+ threading.current_thread().name)
883
+ tls.connection.close()
884
+ del tls.connection
885
+
886
+ def _after_fork(self):
887
+ self._manager = None
888
+ try:
889
+ self._incref()
890
+ except Exception as e:
891
+ # the proxy may just be for a manager which has shutdown
892
+ util.info('incref failed: %s' % e)
893
+
894
+ def __reduce__(self):
895
+ kwds = {}
896
+ if get_spawning_popen() is not None:
897
+ kwds['authkey'] = self._authkey
898
+
899
+ if getattr(self, '_isauto', False):
900
+ kwds['exposed'] = self._exposed_
901
+ return (RebuildProxy,
902
+ (AutoProxy, self._token, self._serializer, kwds))
903
+ else:
904
+ return (RebuildProxy,
905
+ (type(self), self._token, self._serializer, kwds))
906
+
907
+ def __deepcopy__(self, memo):
908
+ return self._getvalue()
909
+
910
+ def __repr__(self):
911
+ return '<%s object, typeid %r at %#x>' % \
912
+ (type(self).__name__, self._token.typeid, id(self))
913
+
914
+ def __str__(self):
915
+ '''
916
+ Return representation of the referent (or a fall-back if that fails)
917
+ '''
918
+ try:
919
+ return self._callmethod('__repr__')
920
+ except Exception:
921
+ return repr(self)[:-1] + "; '__str__()' failed>"
922
+
923
+ #
924
+ # Function used for unpickling
925
+ #
926
+
927
+ def RebuildProxy(func, token, serializer, kwds):
928
+ '''
929
+ Function used for unpickling proxy objects.
930
+ '''
931
+ server = getattr(process.current_process(), '_manager_server', None)
932
+ if server and server.address == token.address:
933
+ util.debug('Rebuild a proxy owned by manager, token=%r', token)
934
+ kwds['manager_owned'] = True
935
+ if token.id not in server.id_to_local_proxy_obj:
936
+ server.id_to_local_proxy_obj[token.id] = \
937
+ server.id_to_obj[token.id]
938
+ incref = (
939
+ kwds.pop('incref', True) and
940
+ not getattr(process.current_process(), '_inheriting', False)
941
+ )
942
+ return func(token, serializer, incref=incref, **kwds)
943
+
944
+ #
945
+ # Functions to create proxies and proxy types
946
+ #
947
+
948
+ def MakeProxyType(name, exposed, _cache={}):
949
+ '''
950
+ Return a proxy type whose methods are given by `exposed`
951
+ '''
952
+ exposed = tuple(exposed)
953
+ try:
954
+ return _cache[(name, exposed)]
955
+ except KeyError:
956
+ pass
957
+
958
+ dic = {}
959
+
960
+ for meth in exposed:
961
+ exec('''def %s(self, /, *args, **kwds):
962
+ return self._callmethod(%r, args, kwds)''' % (meth, meth), dic)
963
+
964
+ ProxyType = type(name, (BaseProxy,), dic)
965
+ ProxyType._exposed_ = exposed
966
+ _cache[(name, exposed)] = ProxyType
967
+ return ProxyType
968
+
969
+
970
+ def AutoProxy(token, serializer, manager=None, authkey=None,
971
+ exposed=None, incref=True, manager_owned=False):
972
+ '''
973
+ Return an auto-proxy for `token`
974
+ '''
975
+ _Client = listener_client[serializer][1]
976
+
977
+ if exposed is None:
978
+ conn = _Client(token.address, authkey=authkey)
979
+ try:
980
+ exposed = dispatch(conn, None, 'get_methods', (token,))
981
+ finally:
982
+ conn.close()
983
+
984
+ if authkey is None and manager is not None:
985
+ authkey = manager._authkey
986
+ if authkey is None:
987
+ authkey = process.current_process().authkey
988
+
989
+ ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed)
990
+ proxy = ProxyType(token, serializer, manager=manager, authkey=authkey,
991
+ incref=incref, manager_owned=manager_owned)
992
+ proxy._isauto = True
993
+ return proxy
994
+
995
+ #
996
+ # Types/callables which we will register with SyncManager
997
+ #
998
+
999
+ class Namespace(object):
1000
+ def __init__(self, /, **kwds):
1001
+ self.__dict__.update(kwds)
1002
+ def __repr__(self):
1003
+ items = list(self.__dict__.items())
1004
+ temp = []
1005
+ for name, value in items:
1006
+ if not name.startswith('_'):
1007
+ temp.append('%s=%r' % (name, value))
1008
+ temp.sort()
1009
+ return '%s(%s)' % (self.__class__.__name__, ', '.join(temp))
1010
+
1011
+ class Value(object):
1012
+ def __init__(self, typecode, value, lock=True):
1013
+ self._typecode = typecode
1014
+ self._value = value
1015
+ def get(self):
1016
+ return self._value
1017
+ def set(self, value):
1018
+ self._value = value
1019
+ def __repr__(self):
1020
+ return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value)
1021
+ value = property(get, set)
1022
+
1023
+ def Array(typecode, sequence, lock=True):
1024
+ return array.array(typecode, sequence)
1025
+
1026
+ #
1027
+ # Proxy types used by SyncManager
1028
+ #
1029
+
1030
+ class IteratorProxy(BaseProxy):
1031
+ _exposed_ = ('__next__', 'send', 'throw', 'close')
1032
+ def __iter__(self):
1033
+ return self
1034
+ def __next__(self, *args):
1035
+ return self._callmethod('__next__', args)
1036
+ def send(self, *args):
1037
+ return self._callmethod('send', args)
1038
+ def throw(self, *args):
1039
+ return self._callmethod('throw', args)
1040
+ def close(self, *args):
1041
+ return self._callmethod('close', args)
1042
+
1043
+
1044
+ class AcquirerProxy(BaseProxy):
1045
+ _exposed_ = ('acquire', 'release')
1046
+ def acquire(self, blocking=True, timeout=None):
1047
+ args = (blocking,) if timeout is None else (blocking, timeout)
1048
+ return self._callmethod('acquire', args)
1049
+ def release(self):
1050
+ return self._callmethod('release')
1051
+ def __enter__(self):
1052
+ return self._callmethod('acquire')
1053
+ def __exit__(self, exc_type, exc_val, exc_tb):
1054
+ return self._callmethod('release')
1055
+
1056
+
1057
+ class ConditionProxy(AcquirerProxy):
1058
+ _exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all')
1059
+ def wait(self, timeout=None):
1060
+ return self._callmethod('wait', (timeout,))
1061
+ def notify(self, n=1):
1062
+ return self._callmethod('notify', (n,))
1063
+ def notify_all(self):
1064
+ return self._callmethod('notify_all')
1065
+ def wait_for(self, predicate, timeout=None):
1066
+ result = predicate()
1067
+ if result:
1068
+ return result
1069
+ if timeout is not None:
1070
+ endtime = time.monotonic() + timeout
1071
+ else:
1072
+ endtime = None
1073
+ waittime = None
1074
+ while not result:
1075
+ if endtime is not None:
1076
+ waittime = endtime - time.monotonic()
1077
+ if waittime <= 0:
1078
+ break
1079
+ self.wait(waittime)
1080
+ result = predicate()
1081
+ return result
1082
+
1083
+
1084
+ class EventProxy(BaseProxy):
1085
+ _exposed_ = ('is_set', 'set', 'clear', 'wait')
1086
+ def is_set(self):
1087
+ return self._callmethod('is_set')
1088
+ def set(self):
1089
+ return self._callmethod('set')
1090
+ def clear(self):
1091
+ return self._callmethod('clear')
1092
+ def wait(self, timeout=None):
1093
+ return self._callmethod('wait', (timeout,))
1094
+
1095
+
1096
+ class BarrierProxy(BaseProxy):
1097
+ _exposed_ = ('__getattribute__', 'wait', 'abort', 'reset')
1098
+ def wait(self, timeout=None):
1099
+ return self._callmethod('wait', (timeout,))
1100
+ def abort(self):
1101
+ return self._callmethod('abort')
1102
+ def reset(self):
1103
+ return self._callmethod('reset')
1104
+ @property
1105
+ def parties(self):
1106
+ return self._callmethod('__getattribute__', ('parties',))
1107
+ @property
1108
+ def n_waiting(self):
1109
+ return self._callmethod('__getattribute__', ('n_waiting',))
1110
+ @property
1111
+ def broken(self):
1112
+ return self._callmethod('__getattribute__', ('broken',))
1113
+
1114
+
1115
+ class NamespaceProxy(BaseProxy):
1116
+ _exposed_ = ('__getattribute__', '__setattr__', '__delattr__')
1117
+ def __getattr__(self, key):
1118
+ if key[0] == '_':
1119
+ return object.__getattribute__(self, key)
1120
+ callmethod = object.__getattribute__(self, '_callmethod')
1121
+ return callmethod('__getattribute__', (key,))
1122
+ def __setattr__(self, key, value):
1123
+ if key[0] == '_':
1124
+ return object.__setattr__(self, key, value)
1125
+ callmethod = object.__getattribute__(self, '_callmethod')
1126
+ return callmethod('__setattr__', (key, value))
1127
+ def __delattr__(self, key):
1128
+ if key[0] == '_':
1129
+ return object.__delattr__(self, key)
1130
+ callmethod = object.__getattribute__(self, '_callmethod')
1131
+ return callmethod('__delattr__', (key,))
1132
+
1133
+
1134
+ class ValueProxy(BaseProxy):
1135
+ _exposed_ = ('get', 'set')
1136
+ def get(self):
1137
+ return self._callmethod('get')
1138
+ def set(self, value):
1139
+ return self._callmethod('set', (value,))
1140
+ value = property(get, set)
1141
+
1142
+ __class_getitem__ = classmethod(types.GenericAlias)
1143
+
1144
+
1145
+ BaseListProxy = MakeProxyType('BaseListProxy', (
1146
+ '__add__', '__contains__', '__delitem__', '__getitem__', '__len__',
1147
+ '__mul__', '__reversed__', '__rmul__', '__setitem__',
1148
+ 'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove',
1149
+ 'reverse', 'sort', '__imul__'
1150
+ ))
1151
+ class ListProxy(BaseListProxy):
1152
+ def __iadd__(self, value):
1153
+ self._callmethod('extend', (value,))
1154
+ return self
1155
+ def __imul__(self, value):
1156
+ self._callmethod('__imul__', (value,))
1157
+ return self
1158
+
1159
+
1160
+ DictProxy = MakeProxyType('DictProxy', (
1161
+ '__contains__', '__delitem__', '__getitem__', '__iter__', '__len__',
1162
+ '__setitem__', 'clear', 'copy', 'get', 'items',
1163
+ 'keys', 'pop', 'popitem', 'setdefault', 'update', 'values'
1164
+ ))
1165
+ DictProxy._method_to_typeid_ = {
1166
+ '__iter__': 'Iterator',
1167
+ }
1168
+
1169
+
1170
+ ArrayProxy = MakeProxyType('ArrayProxy', (
1171
+ '__len__', '__getitem__', '__setitem__'
1172
+ ))
1173
+
1174
+
1175
+ BasePoolProxy = MakeProxyType('PoolProxy', (
1176
+ 'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join',
1177
+ 'map', 'map_async', 'starmap', 'starmap_async', 'terminate',
1178
+ ))
1179
+ BasePoolProxy._method_to_typeid_ = {
1180
+ 'apply_async': 'AsyncResult',
1181
+ 'map_async': 'AsyncResult',
1182
+ 'starmap_async': 'AsyncResult',
1183
+ 'imap': 'Iterator',
1184
+ 'imap_unordered': 'Iterator'
1185
+ }
1186
+ class PoolProxy(BasePoolProxy):
1187
+ def __enter__(self):
1188
+ return self
1189
+ def __exit__(self, exc_type, exc_val, exc_tb):
1190
+ self.terminate()
1191
+
1192
+ #
1193
+ # Definition of SyncManager
1194
+ #
1195
+
1196
+ class SyncManager(BaseManager):
1197
+ '''
1198
+ Subclass of `BaseManager` which supports a number of shared object types.
1199
+
1200
+ The types registered are those intended for the synchronization
1201
+ of threads, plus `dict`, `list` and `Namespace`.
1202
+
1203
+ The `multiprocessing.Manager()` function creates started instances of
1204
+ this class.
1205
+ '''
1206
+
1207
+ SyncManager.register('Queue', queue.Queue)
1208
+ SyncManager.register('JoinableQueue', queue.Queue)
1209
+ SyncManager.register('Event', threading.Event, EventProxy)
1210
+ SyncManager.register('Lock', threading.Lock, AcquirerProxy)
1211
+ SyncManager.register('RLock', threading.RLock, AcquirerProxy)
1212
+ SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy)
1213
+ SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore,
1214
+ AcquirerProxy)
1215
+ SyncManager.register('Condition', threading.Condition, ConditionProxy)
1216
+ SyncManager.register('Barrier', threading.Barrier, BarrierProxy)
1217
+ SyncManager.register('Pool', pool.Pool, PoolProxy)
1218
+ SyncManager.register('list', list, ListProxy)
1219
+ SyncManager.register('dict', dict, DictProxy)
1220
+ SyncManager.register('Value', Value, ValueProxy)
1221
+ SyncManager.register('Array', Array, ArrayProxy)
1222
+ SyncManager.register('Namespace', Namespace, NamespaceProxy)
1223
+
1224
+ # types returned by methods of PoolProxy
1225
+ SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False)
1226
+ SyncManager.register('AsyncResult', create_method=False)
1227
+
1228
+ #
1229
+ # Definition of SharedMemoryManager and SharedMemoryServer
1230
+ #
1231
+
1232
+ if HAS_SHMEM:
1233
+ class _SharedMemoryTracker:
1234
+ "Manages one or more shared memory segments."
1235
+
1236
+ def __init__(self, name, segment_names=[]):
1237
+ self.shared_memory_context_name = name
1238
+ self.segment_names = segment_names
1239
+
1240
+ def register_segment(self, segment_name):
1241
+ "Adds the supplied shared memory block name to tracker."
1242
+ util.debug(f"Register segment {segment_name!r} in pid {getpid()}")
1243
+ self.segment_names.append(segment_name)
1244
+
1245
+ def destroy_segment(self, segment_name):
1246
+ """Calls unlink() on the shared memory block with the supplied name
1247
+ and removes it from the list of blocks being tracked."""
1248
+ util.debug(f"Destroy segment {segment_name!r} in pid {getpid()}")
1249
+ self.segment_names.remove(segment_name)
1250
+ segment = shared_memory.SharedMemory(segment_name)
1251
+ segment.close()
1252
+ segment.unlink()
1253
+
1254
+ def unlink(self):
1255
+ "Calls destroy_segment() on all tracked shared memory blocks."
1256
+ for segment_name in self.segment_names[:]:
1257
+ self.destroy_segment(segment_name)
1258
+
1259
+ def __del__(self):
1260
+ util.debug(f"Call {self.__class__.__name__}.__del__ in {getpid()}")
1261
+ self.unlink()
1262
+
1263
+ def __getstate__(self):
1264
+ return (self.shared_memory_context_name, self.segment_names)
1265
+
1266
+ def __setstate__(self, state):
1267
+ self.__init__(*state)
1268
+
1269
+
1270
+ class SharedMemoryServer(Server):
1271
+
1272
+ public = Server.public + \
1273
+ ['track_segment', 'release_segment', 'list_segments']
1274
+
1275
+ def __init__(self, *args, **kwargs):
1276
+ Server.__init__(self, *args, **kwargs)
1277
+ address = self.address
1278
+ # The address of Linux abstract namespaces can be bytes
1279
+ if isinstance(address, bytes):
1280
+ address = os.fsdecode(address)
1281
+ self.shared_memory_context = \
1282
+ _SharedMemoryTracker(f"shm_{address}_{getpid()}")
1283
+ util.debug(f"SharedMemoryServer started by pid {getpid()}")
1284
+
1285
+ def create(self, c, typeid, /, *args, **kwargs):
1286
+ """Create a new distributed-shared object (not backed by a shared
1287
+ memory block) and return its id to be used in a Proxy Object."""
1288
+ # Unless set up as a shared proxy, don't make shared_memory_context
1289
+ # a standard part of kwargs. This makes things easier for supplying
1290
+ # simple functions.
1291
+ if hasattr(self.registry[typeid][-1], "_shared_memory_proxy"):
1292
+ kwargs['shared_memory_context'] = self.shared_memory_context
1293
+ return Server.create(self, c, typeid, *args, **kwargs)
1294
+
1295
+ def shutdown(self, c):
1296
+ "Call unlink() on all tracked shared memory, terminate the Server."
1297
+ self.shared_memory_context.unlink()
1298
+ return Server.shutdown(self, c)
1299
+
1300
+ def track_segment(self, c, segment_name):
1301
+ "Adds the supplied shared memory block name to Server's tracker."
1302
+ self.shared_memory_context.register_segment(segment_name)
1303
+
1304
+ def release_segment(self, c, segment_name):
1305
+ """Calls unlink() on the shared memory block with the supplied name
1306
+ and removes it from the tracker instance inside the Server."""
1307
+ self.shared_memory_context.destroy_segment(segment_name)
1308
+
1309
+ def list_segments(self, c):
1310
+ """Returns a list of names of shared memory blocks that the Server
1311
+ is currently tracking."""
1312
+ return self.shared_memory_context.segment_names
1313
+
1314
+
1315
+ class SharedMemoryManager(BaseManager):
1316
+ """Like SyncManager but uses SharedMemoryServer instead of Server.
1317
+
1318
+ It provides methods for creating and returning SharedMemory instances
1319
+ and for creating a list-like object (ShareableList) backed by shared
1320
+ memory. It also provides methods that create and return Proxy Objects
1321
+ that support synchronization across processes (i.e. multi-process-safe
1322
+ locks and semaphores).
1323
+ """
1324
+
1325
+ _Server = SharedMemoryServer
1326
+
1327
+ def __init__(self, *args, **kwargs):
1328
+ if os.name == "posix":
1329
+ # bpo-36867: Ensure the resource_tracker is running before
1330
+ # launching the manager process, so that concurrent
1331
+ # shared_memory manipulation both in the manager and in the
1332
+ # current process does not create two resource_tracker
1333
+ # processes.
1334
+ from . import resource_tracker
1335
+ resource_tracker.ensure_running()
1336
+ BaseManager.__init__(self, *args, **kwargs)
1337
+ util.debug(f"{self.__class__.__name__} created by pid {getpid()}")
1338
+
1339
+ def __del__(self):
1340
+ util.debug(f"{self.__class__.__name__}.__del__ by pid {getpid()}")
1341
+ pass
1342
+
1343
+ def get_server(self):
1344
+ 'Better than monkeypatching for now; merge into Server ultimately'
1345
+ if self._state.value != State.INITIAL:
1346
+ if self._state.value == State.STARTED:
1347
+ raise ProcessError("Already started SharedMemoryServer")
1348
+ elif self._state.value == State.SHUTDOWN:
1349
+ raise ProcessError("SharedMemoryManager has shut down")
1350
+ else:
1351
+ raise ProcessError(
1352
+ "Unknown state {!r}".format(self._state.value))
1353
+ return self._Server(self._registry, self._address,
1354
+ self._authkey, self._serializer)
1355
+
1356
+ def SharedMemory(self, size):
1357
+ """Returns a new SharedMemory instance with the specified size in
1358
+ bytes, to be tracked by the manager."""
1359
+ with self._Client(self._address, authkey=self._authkey) as conn:
1360
+ sms = shared_memory.SharedMemory(None, create=True, size=size)
1361
+ try:
1362
+ dispatch(conn, None, 'track_segment', (sms.name,))
1363
+ except BaseException as e:
1364
+ sms.unlink()
1365
+ raise e
1366
+ return sms
1367
+
1368
+ def ShareableList(self, sequence):
1369
+ """Returns a new ShareableList instance populated with the values
1370
+ from the input sequence, to be tracked by the manager."""
1371
+ with self._Client(self._address, authkey=self._authkey) as conn:
1372
+ sl = shared_memory.ShareableList(sequence)
1373
+ try:
1374
+ dispatch(conn, None, 'track_segment', (sl.shm.name,))
1375
+ except BaseException as e:
1376
+ sl.shm.unlink()
1377
+ raise e
1378
+ return sl
llava/lib/python3.10/multiprocessing/popen_forkserver.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import os
3
+
4
+ from .context import reduction, set_spawning_popen
5
+ if not reduction.HAVE_SEND_HANDLE:
6
+ raise ImportError('No support for sending fds between processes')
7
+ from . import forkserver
8
+ from . import popen_fork
9
+ from . import spawn
10
+ from . import util
11
+
12
+
13
+ __all__ = ['Popen']
14
+
15
+ #
16
+ # Wrapper for an fd used while launching a process
17
+ #
18
+
19
+ class _DupFd(object):
20
+ def __init__(self, ind):
21
+ self.ind = ind
22
+ def detach(self):
23
+ return forkserver.get_inherited_fds()[self.ind]
24
+
25
+ #
26
+ # Start child process using a server process
27
+ #
28
+
29
+ class Popen(popen_fork.Popen):
30
+ method = 'forkserver'
31
+ DupFd = _DupFd
32
+
33
+ def __init__(self, process_obj):
34
+ self._fds = []
35
+ super().__init__(process_obj)
36
+
37
+ def duplicate_for_child(self, fd):
38
+ self._fds.append(fd)
39
+ return len(self._fds) - 1
40
+
41
+ def _launch(self, process_obj):
42
+ prep_data = spawn.get_preparation_data(process_obj._name)
43
+ buf = io.BytesIO()
44
+ set_spawning_popen(self)
45
+ try:
46
+ reduction.dump(prep_data, buf)
47
+ reduction.dump(process_obj, buf)
48
+ finally:
49
+ set_spawning_popen(None)
50
+
51
+ self.sentinel, w = forkserver.connect_to_new_process(self._fds)
52
+ # Keep a duplicate of the data pipe's write end as a sentinel of the
53
+ # parent process used by the child process.
54
+ _parent_w = os.dup(w)
55
+ self.finalizer = util.Finalize(self, util.close_fds,
56
+ (_parent_w, self.sentinel))
57
+ with open(w, 'wb', closefd=True) as f:
58
+ f.write(buf.getbuffer())
59
+ self.pid = forkserver.read_signed(self.sentinel)
60
+
61
+ def poll(self, flag=os.WNOHANG):
62
+ if self.returncode is None:
63
+ from multiprocessing.connection import wait
64
+ timeout = 0 if flag == os.WNOHANG else None
65
+ if not wait([self.sentinel], timeout):
66
+ return None
67
+ try:
68
+ self.returncode = forkserver.read_signed(self.sentinel)
69
+ except (OSError, EOFError):
70
+ # This should not happen usually, but perhaps the forkserver
71
+ # process itself got killed
72
+ self.returncode = 255
73
+
74
+ return self.returncode
llava/lib/python3.10/multiprocessing/popen_spawn_posix.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import os
3
+
4
+ from .context import reduction, set_spawning_popen
5
+ from . import popen_fork
6
+ from . import spawn
7
+ from . import util
8
+
9
+ __all__ = ['Popen']
10
+
11
+
12
+ #
13
+ # Wrapper for an fd used while launching a process
14
+ #
15
+
16
+ class _DupFd(object):
17
+ def __init__(self, fd):
18
+ self.fd = fd
19
+ def detach(self):
20
+ return self.fd
21
+
22
+ #
23
+ # Start child process using a fresh interpreter
24
+ #
25
+
26
+ class Popen(popen_fork.Popen):
27
+ method = 'spawn'
28
+ DupFd = _DupFd
29
+
30
+ def __init__(self, process_obj):
31
+ self._fds = []
32
+ super().__init__(process_obj)
33
+
34
+ def duplicate_for_child(self, fd):
35
+ self._fds.append(fd)
36
+ return fd
37
+
38
+ def _launch(self, process_obj):
39
+ from . import resource_tracker
40
+ tracker_fd = resource_tracker.getfd()
41
+ self._fds.append(tracker_fd)
42
+ prep_data = spawn.get_preparation_data(process_obj._name)
43
+ fp = io.BytesIO()
44
+ set_spawning_popen(self)
45
+ try:
46
+ reduction.dump(prep_data, fp)
47
+ reduction.dump(process_obj, fp)
48
+ finally:
49
+ set_spawning_popen(None)
50
+
51
+ parent_r = child_w = child_r = parent_w = None
52
+ try:
53
+ parent_r, child_w = os.pipe()
54
+ child_r, parent_w = os.pipe()
55
+ cmd = spawn.get_command_line(tracker_fd=tracker_fd,
56
+ pipe_handle=child_r)
57
+ self._fds.extend([child_r, child_w])
58
+ self.pid = util.spawnv_passfds(spawn.get_executable(),
59
+ cmd, self._fds)
60
+ self.sentinel = parent_r
61
+ with open(parent_w, 'wb', closefd=False) as f:
62
+ f.write(fp.getbuffer())
63
+ finally:
64
+ fds_to_close = []
65
+ for fd in (parent_r, parent_w):
66
+ if fd is not None:
67
+ fds_to_close.append(fd)
68
+ self.finalizer = util.Finalize(self, util.close_fds, fds_to_close)
69
+
70
+ for fd in (child_r, child_w):
71
+ if fd is not None:
72
+ os.close(fd)
llava/lib/python3.10/multiprocessing/reduction.py ADDED
@@ -0,0 +1,281 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # Module which deals with pickling of objects.
3
+ #
4
+ # multiprocessing/reduction.py
5
+ #
6
+ # Copyright (c) 2006-2008, R Oudkerk
7
+ # Licensed to PSF under a Contributor Agreement.
8
+ #
9
+
10
+ from abc import ABCMeta
11
+ import copyreg
12
+ import functools
13
+ import io
14
+ import os
15
+ import pickle
16
+ import socket
17
+ import sys
18
+
19
+ from . import context
20
+
21
+ __all__ = ['send_handle', 'recv_handle', 'ForkingPickler', 'register', 'dump']
22
+
23
+
24
+ HAVE_SEND_HANDLE = (sys.platform == 'win32' or
25
+ (hasattr(socket, 'CMSG_LEN') and
26
+ hasattr(socket, 'SCM_RIGHTS') and
27
+ hasattr(socket.socket, 'sendmsg')))
28
+
29
+ #
30
+ # Pickler subclass
31
+ #
32
+
33
+ class ForkingPickler(pickle.Pickler):
34
+ '''Pickler subclass used by multiprocessing.'''
35
+ _extra_reducers = {}
36
+ _copyreg_dispatch_table = copyreg.dispatch_table
37
+
38
+ def __init__(self, *args):
39
+ super().__init__(*args)
40
+ self.dispatch_table = self._copyreg_dispatch_table.copy()
41
+ self.dispatch_table.update(self._extra_reducers)
42
+
43
+ @classmethod
44
+ def register(cls, type, reduce):
45
+ '''Register a reduce function for a type.'''
46
+ cls._extra_reducers[type] = reduce
47
+
48
+ @classmethod
49
+ def dumps(cls, obj, protocol=None):
50
+ buf = io.BytesIO()
51
+ cls(buf, protocol).dump(obj)
52
+ return buf.getbuffer()
53
+
54
+ loads = pickle.loads
55
+
56
+ register = ForkingPickler.register
57
+
58
+ def dump(obj, file, protocol=None):
59
+ '''Replacement for pickle.dump() using ForkingPickler.'''
60
+ ForkingPickler(file, protocol).dump(obj)
61
+
62
+ #
63
+ # Platform specific definitions
64
+ #
65
+
66
+ if sys.platform == 'win32':
67
+ # Windows
68
+ __all__ += ['DupHandle', 'duplicate', 'steal_handle']
69
+ import _winapi
70
+
71
+ def duplicate(handle, target_process=None, inheritable=False,
72
+ *, source_process=None):
73
+ '''Duplicate a handle. (target_process is a handle not a pid!)'''
74
+ current_process = _winapi.GetCurrentProcess()
75
+ if source_process is None:
76
+ source_process = current_process
77
+ if target_process is None:
78
+ target_process = current_process
79
+ return _winapi.DuplicateHandle(
80
+ source_process, handle, target_process,
81
+ 0, inheritable, _winapi.DUPLICATE_SAME_ACCESS)
82
+
83
+ def steal_handle(source_pid, handle):
84
+ '''Steal a handle from process identified by source_pid.'''
85
+ source_process_handle = _winapi.OpenProcess(
86
+ _winapi.PROCESS_DUP_HANDLE, False, source_pid)
87
+ try:
88
+ return _winapi.DuplicateHandle(
89
+ source_process_handle, handle,
90
+ _winapi.GetCurrentProcess(), 0, False,
91
+ _winapi.DUPLICATE_SAME_ACCESS | _winapi.DUPLICATE_CLOSE_SOURCE)
92
+ finally:
93
+ _winapi.CloseHandle(source_process_handle)
94
+
95
+ def send_handle(conn, handle, destination_pid):
96
+ '''Send a handle over a local connection.'''
97
+ dh = DupHandle(handle, _winapi.DUPLICATE_SAME_ACCESS, destination_pid)
98
+ conn.send(dh)
99
+
100
+ def recv_handle(conn):
101
+ '''Receive a handle over a local connection.'''
102
+ return conn.recv().detach()
103
+
104
+ class DupHandle(object):
105
+ '''Picklable wrapper for a handle.'''
106
+ def __init__(self, handle, access, pid=None):
107
+ if pid is None:
108
+ # We just duplicate the handle in the current process and
109
+ # let the receiving process steal the handle.
110
+ pid = os.getpid()
111
+ proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, pid)
112
+ try:
113
+ self._handle = _winapi.DuplicateHandle(
114
+ _winapi.GetCurrentProcess(),
115
+ handle, proc, access, False, 0)
116
+ finally:
117
+ _winapi.CloseHandle(proc)
118
+ self._access = access
119
+ self._pid = pid
120
+
121
+ def detach(self):
122
+ '''Get the handle. This should only be called once.'''
123
+ # retrieve handle from process which currently owns it
124
+ if self._pid == os.getpid():
125
+ # The handle has already been duplicated for this process.
126
+ return self._handle
127
+ # We must steal the handle from the process whose pid is self._pid.
128
+ proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False,
129
+ self._pid)
130
+ try:
131
+ return _winapi.DuplicateHandle(
132
+ proc, self._handle, _winapi.GetCurrentProcess(),
133
+ self._access, False, _winapi.DUPLICATE_CLOSE_SOURCE)
134
+ finally:
135
+ _winapi.CloseHandle(proc)
136
+
137
+ else:
138
+ # Unix
139
+ __all__ += ['DupFd', 'sendfds', 'recvfds']
140
+ import array
141
+
142
+ # On MacOSX we should acknowledge receipt of fds -- see Issue14669
143
+ ACKNOWLEDGE = sys.platform == 'darwin'
144
+
145
+ def sendfds(sock, fds):
146
+ '''Send an array of fds over an AF_UNIX socket.'''
147
+ fds = array.array('i', fds)
148
+ msg = bytes([len(fds) % 256])
149
+ sock.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fds)])
150
+ if ACKNOWLEDGE and sock.recv(1) != b'A':
151
+ raise RuntimeError('did not receive acknowledgement of fd')
152
+
153
+ def recvfds(sock, size):
154
+ '''Receive an array of fds over an AF_UNIX socket.'''
155
+ a = array.array('i')
156
+ bytes_size = a.itemsize * size
157
+ msg, ancdata, flags, addr = sock.recvmsg(1, socket.CMSG_SPACE(bytes_size))
158
+ if not msg and not ancdata:
159
+ raise EOFError
160
+ try:
161
+ if ACKNOWLEDGE:
162
+ sock.send(b'A')
163
+ if len(ancdata) != 1:
164
+ raise RuntimeError('received %d items of ancdata' %
165
+ len(ancdata))
166
+ cmsg_level, cmsg_type, cmsg_data = ancdata[0]
167
+ if (cmsg_level == socket.SOL_SOCKET and
168
+ cmsg_type == socket.SCM_RIGHTS):
169
+ if len(cmsg_data) % a.itemsize != 0:
170
+ raise ValueError
171
+ a.frombytes(cmsg_data)
172
+ if len(a) % 256 != msg[0]:
173
+ raise AssertionError(
174
+ "Len is {0:n} but msg[0] is {1!r}".format(
175
+ len(a), msg[0]))
176
+ return list(a)
177
+ except (ValueError, IndexError):
178
+ pass
179
+ raise RuntimeError('Invalid data received')
180
+
181
+ def send_handle(conn, handle, destination_pid):
182
+ '''Send a handle over a local connection.'''
183
+ with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s:
184
+ sendfds(s, [handle])
185
+
186
+ def recv_handle(conn):
187
+ '''Receive a handle over a local connection.'''
188
+ with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s:
189
+ return recvfds(s, 1)[0]
190
+
191
+ def DupFd(fd):
192
+ '''Return a wrapper for an fd.'''
193
+ popen_obj = context.get_spawning_popen()
194
+ if popen_obj is not None:
195
+ return popen_obj.DupFd(popen_obj.duplicate_for_child(fd))
196
+ elif HAVE_SEND_HANDLE:
197
+ from . import resource_sharer
198
+ return resource_sharer.DupFd(fd)
199
+ else:
200
+ raise ValueError('SCM_RIGHTS appears not to be available')
201
+
202
+ #
203
+ # Try making some callable types picklable
204
+ #
205
+
206
+ def _reduce_method(m):
207
+ if m.__self__ is None:
208
+ return getattr, (m.__class__, m.__func__.__name__)
209
+ else:
210
+ return getattr, (m.__self__, m.__func__.__name__)
211
+ class _C:
212
+ def f(self):
213
+ pass
214
+ register(type(_C().f), _reduce_method)
215
+
216
+
217
+ def _reduce_method_descriptor(m):
218
+ return getattr, (m.__objclass__, m.__name__)
219
+ register(type(list.append), _reduce_method_descriptor)
220
+ register(type(int.__add__), _reduce_method_descriptor)
221
+
222
+
223
+ def _reduce_partial(p):
224
+ return _rebuild_partial, (p.func, p.args, p.keywords or {})
225
+ def _rebuild_partial(func, args, keywords):
226
+ return functools.partial(func, *args, **keywords)
227
+ register(functools.partial, _reduce_partial)
228
+
229
+ #
230
+ # Make sockets picklable
231
+ #
232
+
233
+ if sys.platform == 'win32':
234
+ def _reduce_socket(s):
235
+ from .resource_sharer import DupSocket
236
+ return _rebuild_socket, (DupSocket(s),)
237
+ def _rebuild_socket(ds):
238
+ return ds.detach()
239
+ register(socket.socket, _reduce_socket)
240
+
241
+ else:
242
+ def _reduce_socket(s):
243
+ df = DupFd(s.fileno())
244
+ return _rebuild_socket, (df, s.family, s.type, s.proto)
245
+ def _rebuild_socket(df, family, type, proto):
246
+ fd = df.detach()
247
+ return socket.socket(family, type, proto, fileno=fd)
248
+ register(socket.socket, _reduce_socket)
249
+
250
+
251
+ class AbstractReducer(metaclass=ABCMeta):
252
+ '''Abstract base class for use in implementing a Reduction class
253
+ suitable for use in replacing the standard reduction mechanism
254
+ used in multiprocessing.'''
255
+ ForkingPickler = ForkingPickler
256
+ register = register
257
+ dump = dump
258
+ send_handle = send_handle
259
+ recv_handle = recv_handle
260
+
261
+ if sys.platform == 'win32':
262
+ steal_handle = steal_handle
263
+ duplicate = duplicate
264
+ DupHandle = DupHandle
265
+ else:
266
+ sendfds = sendfds
267
+ recvfds = recvfds
268
+ DupFd = DupFd
269
+
270
+ _reduce_method = _reduce_method
271
+ _reduce_method_descriptor = _reduce_method_descriptor
272
+ _rebuild_partial = _rebuild_partial
273
+ _reduce_socket = _reduce_socket
274
+ _rebuild_socket = _rebuild_socket
275
+
276
+ def __init__(self, *args):
277
+ register(type(_C().f), _reduce_method)
278
+ register(type(list.append), _reduce_method_descriptor)
279
+ register(type(int.__add__), _reduce_method_descriptor)
280
+ register(functools.partial, _reduce_partial)
281
+ register(socket.socket, _reduce_socket)
llava/lib/python3.10/multiprocessing/resource_sharer.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #
2
+ # We use a background thread for sharing fds on Unix, and for sharing sockets on
3
+ # Windows.
4
+ #
5
+ # A client which wants to pickle a resource registers it with the resource
6
+ # sharer and gets an identifier in return. The unpickling process will connect
7
+ # to the resource sharer, sends the identifier and its pid, and then receives
8
+ # the resource.
9
+ #
10
+
11
+ import os
12
+ import signal
13
+ import socket
14
+ import sys
15
+ import threading
16
+
17
+ from . import process
18
+ from .context import reduction
19
+ from . import util
20
+
21
+ __all__ = ['stop']
22
+
23
+
24
+ if sys.platform == 'win32':
25
+ __all__ += ['DupSocket']
26
+
27
+ class DupSocket(object):
28
+ '''Picklable wrapper for a socket.'''
29
+ def __init__(self, sock):
30
+ new_sock = sock.dup()
31
+ def send(conn, pid):
32
+ share = new_sock.share(pid)
33
+ conn.send_bytes(share)
34
+ self._id = _resource_sharer.register(send, new_sock.close)
35
+
36
+ def detach(self):
37
+ '''Get the socket. This should only be called once.'''
38
+ with _resource_sharer.get_connection(self._id) as conn:
39
+ share = conn.recv_bytes()
40
+ return socket.fromshare(share)
41
+
42
+ else:
43
+ __all__ += ['DupFd']
44
+
45
+ class DupFd(object):
46
+ '''Wrapper for fd which can be used at any time.'''
47
+ def __init__(self, fd):
48
+ new_fd = os.dup(fd)
49
+ def send(conn, pid):
50
+ reduction.send_handle(conn, new_fd, pid)
51
+ def close():
52
+ os.close(new_fd)
53
+ self._id = _resource_sharer.register(send, close)
54
+
55
+ def detach(self):
56
+ '''Get the fd. This should only be called once.'''
57
+ with _resource_sharer.get_connection(self._id) as conn:
58
+ return reduction.recv_handle(conn)
59
+
60
+
61
+ class _ResourceSharer(object):
62
+ '''Manager for resources using background thread.'''
63
+ def __init__(self):
64
+ self._key = 0
65
+ self._cache = {}
66
+ self._lock = threading.Lock()
67
+ self._listener = None
68
+ self._address = None
69
+ self._thread = None
70
+ util.register_after_fork(self, _ResourceSharer._afterfork)
71
+
72
+ def register(self, send, close):
73
+ '''Register resource, returning an identifier.'''
74
+ with self._lock:
75
+ if self._address is None:
76
+ self._start()
77
+ self._key += 1
78
+ self._cache[self._key] = (send, close)
79
+ return (self._address, self._key)
80
+
81
+ @staticmethod
82
+ def get_connection(ident):
83
+ '''Return connection from which to receive identified resource.'''
84
+ from .connection import Client
85
+ address, key = ident
86
+ c = Client(address, authkey=process.current_process().authkey)
87
+ c.send((key, os.getpid()))
88
+ return c
89
+
90
+ def stop(self, timeout=None):
91
+ '''Stop the background thread and clear registered resources.'''
92
+ from .connection import Client
93
+ with self._lock:
94
+ if self._address is not None:
95
+ c = Client(self._address,
96
+ authkey=process.current_process().authkey)
97
+ c.send(None)
98
+ c.close()
99
+ self._thread.join(timeout)
100
+ if self._thread.is_alive():
101
+ util.sub_warning('_ResourceSharer thread did '
102
+ 'not stop when asked')
103
+ self._listener.close()
104
+ self._thread = None
105
+ self._address = None
106
+ self._listener = None
107
+ for key, (send, close) in self._cache.items():
108
+ close()
109
+ self._cache.clear()
110
+
111
+ def _afterfork(self):
112
+ for key, (send, close) in self._cache.items():
113
+ close()
114
+ self._cache.clear()
115
+ self._lock._at_fork_reinit()
116
+ if self._listener is not None:
117
+ self._listener.close()
118
+ self._listener = None
119
+ self._address = None
120
+ self._thread = None
121
+
122
+ def _start(self):
123
+ from .connection import Listener
124
+ assert self._listener is None, "Already have Listener"
125
+ util.debug('starting listener and thread for sending handles')
126
+ self._listener = Listener(authkey=process.current_process().authkey)
127
+ self._address = self._listener.address
128
+ t = threading.Thread(target=self._serve)
129
+ t.daemon = True
130
+ t.start()
131
+ self._thread = t
132
+
133
+ def _serve(self):
134
+ if hasattr(signal, 'pthread_sigmask'):
135
+ signal.pthread_sigmask(signal.SIG_BLOCK, signal.valid_signals())
136
+ while 1:
137
+ try:
138
+ with self._listener.accept() as conn:
139
+ msg = conn.recv()
140
+ if msg is None:
141
+ break
142
+ key, destination_pid = msg
143
+ send, close = self._cache.pop(key)
144
+ try:
145
+ send(conn, destination_pid)
146
+ finally:
147
+ close()
148
+ except:
149
+ if not util.is_exiting():
150
+ sys.excepthook(*sys.exc_info())
151
+
152
+
153
+ _resource_sharer = _ResourceSharer()
154
+ stop = _resource_sharer.stop
llava/lib/python3.10/multiprocessing/shared_memory.py ADDED
@@ -0,0 +1,534 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Provides shared memory for direct access across processes.
2
+
3
+ The API of this package is currently provisional. Refer to the
4
+ documentation for details.
5
+ """
6
+
7
+
8
+ __all__ = [ 'SharedMemory', 'ShareableList' ]
9
+
10
+
11
+ from functools import partial
12
+ import mmap
13
+ import os
14
+ import errno
15
+ import struct
16
+ import secrets
17
+ import types
18
+
19
+ if os.name == "nt":
20
+ import _winapi
21
+ _USE_POSIX = False
22
+ else:
23
+ import _posixshmem
24
+ _USE_POSIX = True
25
+
26
+ from . import resource_tracker
27
+
28
+ _O_CREX = os.O_CREAT | os.O_EXCL
29
+
30
+ # FreeBSD (and perhaps other BSDs) limit names to 14 characters.
31
+ _SHM_SAFE_NAME_LENGTH = 14
32
+
33
+ # Shared memory block name prefix
34
+ if _USE_POSIX:
35
+ _SHM_NAME_PREFIX = '/psm_'
36
+ else:
37
+ _SHM_NAME_PREFIX = 'wnsm_'
38
+
39
+
40
+ def _make_filename():
41
+ "Create a random filename for the shared memory object."
42
+ # number of random bytes to use for name
43
+ nbytes = (_SHM_SAFE_NAME_LENGTH - len(_SHM_NAME_PREFIX)) // 2
44
+ assert nbytes >= 2, '_SHM_NAME_PREFIX too long'
45
+ name = _SHM_NAME_PREFIX + secrets.token_hex(nbytes)
46
+ assert len(name) <= _SHM_SAFE_NAME_LENGTH
47
+ return name
48
+
49
+
50
+ class SharedMemory:
51
+ """Creates a new shared memory block or attaches to an existing
52
+ shared memory block.
53
+
54
+ Every shared memory block is assigned a unique name. This enables
55
+ one process to create a shared memory block with a particular name
56
+ so that a different process can attach to that same shared memory
57
+ block using that same name.
58
+
59
+ As a resource for sharing data across processes, shared memory blocks
60
+ may outlive the original process that created them. When one process
61
+ no longer needs access to a shared memory block that might still be
62
+ needed by other processes, the close() method should be called.
63
+ When a shared memory block is no longer needed by any process, the
64
+ unlink() method should be called to ensure proper cleanup."""
65
+
66
+ # Defaults; enables close() and unlink() to run without errors.
67
+ _name = None
68
+ _fd = -1
69
+ _mmap = None
70
+ _buf = None
71
+ _flags = os.O_RDWR
72
+ _mode = 0o600
73
+ _prepend_leading_slash = True if _USE_POSIX else False
74
+
75
+ def __init__(self, name=None, create=False, size=0):
76
+ if not size >= 0:
77
+ raise ValueError("'size' must be a positive integer")
78
+ if create:
79
+ self._flags = _O_CREX | os.O_RDWR
80
+ if size == 0:
81
+ raise ValueError("'size' must be a positive number different from zero")
82
+ if name is None and not self._flags & os.O_EXCL:
83
+ raise ValueError("'name' can only be None if create=True")
84
+
85
+ if _USE_POSIX:
86
+
87
+ # POSIX Shared Memory
88
+
89
+ if name is None:
90
+ while True:
91
+ name = _make_filename()
92
+ try:
93
+ self._fd = _posixshmem.shm_open(
94
+ name,
95
+ self._flags,
96
+ mode=self._mode
97
+ )
98
+ except FileExistsError:
99
+ continue
100
+ self._name = name
101
+ break
102
+ else:
103
+ name = "/" + name if self._prepend_leading_slash else name
104
+ self._fd = _posixshmem.shm_open(
105
+ name,
106
+ self._flags,
107
+ mode=self._mode
108
+ )
109
+ self._name = name
110
+ try:
111
+ if create and size:
112
+ os.ftruncate(self._fd, size)
113
+ stats = os.fstat(self._fd)
114
+ size = stats.st_size
115
+ self._mmap = mmap.mmap(self._fd, size)
116
+ except OSError:
117
+ self.unlink()
118
+ raise
119
+
120
+ resource_tracker.register(self._name, "shared_memory")
121
+
122
+ else:
123
+
124
+ # Windows Named Shared Memory
125
+
126
+ if create:
127
+ while True:
128
+ temp_name = _make_filename() if name is None else name
129
+ # Create and reserve shared memory block with this name
130
+ # until it can be attached to by mmap.
131
+ h_map = _winapi.CreateFileMapping(
132
+ _winapi.INVALID_HANDLE_VALUE,
133
+ _winapi.NULL,
134
+ _winapi.PAGE_READWRITE,
135
+ (size >> 32) & 0xFFFFFFFF,
136
+ size & 0xFFFFFFFF,
137
+ temp_name
138
+ )
139
+ try:
140
+ last_error_code = _winapi.GetLastError()
141
+ if last_error_code == _winapi.ERROR_ALREADY_EXISTS:
142
+ if name is not None:
143
+ raise FileExistsError(
144
+ errno.EEXIST,
145
+ os.strerror(errno.EEXIST),
146
+ name,
147
+ _winapi.ERROR_ALREADY_EXISTS
148
+ )
149
+ else:
150
+ continue
151
+ self._mmap = mmap.mmap(-1, size, tagname=temp_name)
152
+ finally:
153
+ _winapi.CloseHandle(h_map)
154
+ self._name = temp_name
155
+ break
156
+
157
+ else:
158
+ self._name = name
159
+ # Dynamically determine the existing named shared memory
160
+ # block's size which is likely a multiple of mmap.PAGESIZE.
161
+ h_map = _winapi.OpenFileMapping(
162
+ _winapi.FILE_MAP_READ,
163
+ False,
164
+ name
165
+ )
166
+ try:
167
+ p_buf = _winapi.MapViewOfFile(
168
+ h_map,
169
+ _winapi.FILE_MAP_READ,
170
+ 0,
171
+ 0,
172
+ 0
173
+ )
174
+ finally:
175
+ _winapi.CloseHandle(h_map)
176
+ try:
177
+ size = _winapi.VirtualQuerySize(p_buf)
178
+ finally:
179
+ _winapi.UnmapViewOfFile(p_buf)
180
+ self._mmap = mmap.mmap(-1, size, tagname=name)
181
+
182
+ self._size = size
183
+ self._buf = memoryview(self._mmap)
184
+
185
+ def __del__(self):
186
+ try:
187
+ self.close()
188
+ except OSError:
189
+ pass
190
+
191
+ def __reduce__(self):
192
+ return (
193
+ self.__class__,
194
+ (
195
+ self.name,
196
+ False,
197
+ self.size,
198
+ ),
199
+ )
200
+
201
+ def __repr__(self):
202
+ return f'{self.__class__.__name__}({self.name!r}, size={self.size})'
203
+
204
+ @property
205
+ def buf(self):
206
+ "A memoryview of contents of the shared memory block."
207
+ return self._buf
208
+
209
+ @property
210
+ def name(self):
211
+ "Unique name that identifies the shared memory block."
212
+ reported_name = self._name
213
+ if _USE_POSIX and self._prepend_leading_slash:
214
+ if self._name.startswith("/"):
215
+ reported_name = self._name[1:]
216
+ return reported_name
217
+
218
+ @property
219
+ def size(self):
220
+ "Size in bytes."
221
+ return self._size
222
+
223
+ def close(self):
224
+ """Closes access to the shared memory from this instance but does
225
+ not destroy the shared memory block."""
226
+ if self._buf is not None:
227
+ self._buf.release()
228
+ self._buf = None
229
+ if self._mmap is not None:
230
+ self._mmap.close()
231
+ self._mmap = None
232
+ if _USE_POSIX and self._fd >= 0:
233
+ os.close(self._fd)
234
+ self._fd = -1
235
+
236
+ def unlink(self):
237
+ """Requests that the underlying shared memory block be destroyed.
238
+
239
+ In order to ensure proper cleanup of resources, unlink should be
240
+ called once (and only once) across all processes which have access
241
+ to the shared memory block."""
242
+ if _USE_POSIX and self._name:
243
+ _posixshmem.shm_unlink(self._name)
244
+ resource_tracker.unregister(self._name, "shared_memory")
245
+
246
+
247
+ _encoding = "utf8"
248
+
249
+ class ShareableList:
250
+ """Pattern for a mutable list-like object shareable via a shared
251
+ memory block. It differs from the built-in list type in that these
252
+ lists can not change their overall length (i.e. no append, insert,
253
+ etc.)
254
+
255
+ Because values are packed into a memoryview as bytes, the struct
256
+ packing format for any storable value must require no more than 8
257
+ characters to describe its format."""
258
+
259
+ # The shared memory area is organized as follows:
260
+ # - 8 bytes: number of items (N) as a 64-bit integer
261
+ # - (N + 1) * 8 bytes: offsets of each element from the start of the
262
+ # data area
263
+ # - K bytes: the data area storing item values (with encoding and size
264
+ # depending on their respective types)
265
+ # - N * 8 bytes: `struct` format string for each element
266
+ # - N bytes: index into _back_transforms_mapping for each element
267
+ # (for reconstructing the corresponding Python value)
268
+ _types_mapping = {
269
+ int: "q",
270
+ float: "d",
271
+ bool: "xxxxxxx?",
272
+ str: "%ds",
273
+ bytes: "%ds",
274
+ None.__class__: "xxxxxx?x",
275
+ }
276
+ _alignment = 8
277
+ _back_transforms_mapping = {
278
+ 0: lambda value: value, # int, float, bool
279
+ 1: lambda value: value.rstrip(b'\x00').decode(_encoding), # str
280
+ 2: lambda value: value.rstrip(b'\x00'), # bytes
281
+ 3: lambda _value: None, # None
282
+ }
283
+
284
+ @staticmethod
285
+ def _extract_recreation_code(value):
286
+ """Used in concert with _back_transforms_mapping to convert values
287
+ into the appropriate Python objects when retrieving them from
288
+ the list as well as when storing them."""
289
+ if not isinstance(value, (str, bytes, None.__class__)):
290
+ return 0
291
+ elif isinstance(value, str):
292
+ return 1
293
+ elif isinstance(value, bytes):
294
+ return 2
295
+ else:
296
+ return 3 # NoneType
297
+
298
+ def __init__(self, sequence=None, *, name=None):
299
+ if name is None or sequence is not None:
300
+ sequence = sequence or ()
301
+ _formats = [
302
+ self._types_mapping[type(item)]
303
+ if not isinstance(item, (str, bytes))
304
+ else self._types_mapping[type(item)] % (
305
+ self._alignment * (len(item) // self._alignment + 1),
306
+ )
307
+ for item in sequence
308
+ ]
309
+ self._list_len = len(_formats)
310
+ assert sum(len(fmt) <= 8 for fmt in _formats) == self._list_len
311
+ offset = 0
312
+ # The offsets of each list element into the shared memory's
313
+ # data area (0 meaning the start of the data area, not the start
314
+ # of the shared memory area).
315
+ self._allocated_offsets = [0]
316
+ for fmt in _formats:
317
+ offset += self._alignment if fmt[-1] != "s" else int(fmt[:-1])
318
+ self._allocated_offsets.append(offset)
319
+ _recreation_codes = [
320
+ self._extract_recreation_code(item) for item in sequence
321
+ ]
322
+ requested_size = struct.calcsize(
323
+ "q" + self._format_size_metainfo +
324
+ "".join(_formats) +
325
+ self._format_packing_metainfo +
326
+ self._format_back_transform_codes
327
+ )
328
+
329
+ self.shm = SharedMemory(name, create=True, size=requested_size)
330
+ else:
331
+ self.shm = SharedMemory(name)
332
+
333
+ if sequence is not None:
334
+ _enc = _encoding
335
+ struct.pack_into(
336
+ "q" + self._format_size_metainfo,
337
+ self.shm.buf,
338
+ 0,
339
+ self._list_len,
340
+ *(self._allocated_offsets)
341
+ )
342
+ struct.pack_into(
343
+ "".join(_formats),
344
+ self.shm.buf,
345
+ self._offset_data_start,
346
+ *(v.encode(_enc) if isinstance(v, str) else v for v in sequence)
347
+ )
348
+ struct.pack_into(
349
+ self._format_packing_metainfo,
350
+ self.shm.buf,
351
+ self._offset_packing_formats,
352
+ *(v.encode(_enc) for v in _formats)
353
+ )
354
+ struct.pack_into(
355
+ self._format_back_transform_codes,
356
+ self.shm.buf,
357
+ self._offset_back_transform_codes,
358
+ *(_recreation_codes)
359
+ )
360
+
361
+ else:
362
+ self._list_len = len(self) # Obtains size from offset 0 in buffer.
363
+ self._allocated_offsets = list(
364
+ struct.unpack_from(
365
+ self._format_size_metainfo,
366
+ self.shm.buf,
367
+ 1 * 8
368
+ )
369
+ )
370
+
371
+ def _get_packing_format(self, position):
372
+ "Gets the packing format for a single value stored in the list."
373
+ position = position if position >= 0 else position + self._list_len
374
+ if (position >= self._list_len) or (self._list_len < 0):
375
+ raise IndexError("Requested position out of range.")
376
+
377
+ v = struct.unpack_from(
378
+ "8s",
379
+ self.shm.buf,
380
+ self._offset_packing_formats + position * 8
381
+ )[0]
382
+ fmt = v.rstrip(b'\x00')
383
+ fmt_as_str = fmt.decode(_encoding)
384
+
385
+ return fmt_as_str
386
+
387
+ def _get_back_transform(self, position):
388
+ "Gets the back transformation function for a single value."
389
+
390
+ if (position >= self._list_len) or (self._list_len < 0):
391
+ raise IndexError("Requested position out of range.")
392
+
393
+ transform_code = struct.unpack_from(
394
+ "b",
395
+ self.shm.buf,
396
+ self._offset_back_transform_codes + position
397
+ )[0]
398
+ transform_function = self._back_transforms_mapping[transform_code]
399
+
400
+ return transform_function
401
+
402
+ def _set_packing_format_and_transform(self, position, fmt_as_str, value):
403
+ """Sets the packing format and back transformation code for a
404
+ single value in the list at the specified position."""
405
+
406
+ if (position >= self._list_len) or (self._list_len < 0):
407
+ raise IndexError("Requested position out of range.")
408
+
409
+ struct.pack_into(
410
+ "8s",
411
+ self.shm.buf,
412
+ self._offset_packing_formats + position * 8,
413
+ fmt_as_str.encode(_encoding)
414
+ )
415
+
416
+ transform_code = self._extract_recreation_code(value)
417
+ struct.pack_into(
418
+ "b",
419
+ self.shm.buf,
420
+ self._offset_back_transform_codes + position,
421
+ transform_code
422
+ )
423
+
424
+ def __getitem__(self, position):
425
+ position = position if position >= 0 else position + self._list_len
426
+ try:
427
+ offset = self._offset_data_start + self._allocated_offsets[position]
428
+ (v,) = struct.unpack_from(
429
+ self._get_packing_format(position),
430
+ self.shm.buf,
431
+ offset
432
+ )
433
+ except IndexError:
434
+ raise IndexError("index out of range")
435
+
436
+ back_transform = self._get_back_transform(position)
437
+ v = back_transform(v)
438
+
439
+ return v
440
+
441
+ def __setitem__(self, position, value):
442
+ position = position if position >= 0 else position + self._list_len
443
+ try:
444
+ item_offset = self._allocated_offsets[position]
445
+ offset = self._offset_data_start + item_offset
446
+ current_format = self._get_packing_format(position)
447
+ except IndexError:
448
+ raise IndexError("assignment index out of range")
449
+
450
+ if not isinstance(value, (str, bytes)):
451
+ new_format = self._types_mapping[type(value)]
452
+ encoded_value = value
453
+ else:
454
+ allocated_length = self._allocated_offsets[position + 1] - item_offset
455
+
456
+ encoded_value = (value.encode(_encoding)
457
+ if isinstance(value, str) else value)
458
+ if len(encoded_value) > allocated_length:
459
+ raise ValueError("bytes/str item exceeds available storage")
460
+ if current_format[-1] == "s":
461
+ new_format = current_format
462
+ else:
463
+ new_format = self._types_mapping[str] % (
464
+ allocated_length,
465
+ )
466
+
467
+ self._set_packing_format_and_transform(
468
+ position,
469
+ new_format,
470
+ value
471
+ )
472
+ struct.pack_into(new_format, self.shm.buf, offset, encoded_value)
473
+
474
+ def __reduce__(self):
475
+ return partial(self.__class__, name=self.shm.name), ()
476
+
477
+ def __len__(self):
478
+ return struct.unpack_from("q", self.shm.buf, 0)[0]
479
+
480
+ def __repr__(self):
481
+ return f'{self.__class__.__name__}({list(self)}, name={self.shm.name!r})'
482
+
483
+ @property
484
+ def format(self):
485
+ "The struct packing format used by all currently stored items."
486
+ return "".join(
487
+ self._get_packing_format(i) for i in range(self._list_len)
488
+ )
489
+
490
+ @property
491
+ def _format_size_metainfo(self):
492
+ "The struct packing format used for the items' storage offsets."
493
+ return "q" * (self._list_len + 1)
494
+
495
+ @property
496
+ def _format_packing_metainfo(self):
497
+ "The struct packing format used for the items' packing formats."
498
+ return "8s" * self._list_len
499
+
500
+ @property
501
+ def _format_back_transform_codes(self):
502
+ "The struct packing format used for the items' back transforms."
503
+ return "b" * self._list_len
504
+
505
+ @property
506
+ def _offset_data_start(self):
507
+ # - 8 bytes for the list length
508
+ # - (N + 1) * 8 bytes for the element offsets
509
+ return (self._list_len + 2) * 8
510
+
511
+ @property
512
+ def _offset_packing_formats(self):
513
+ return self._offset_data_start + self._allocated_offsets[-1]
514
+
515
+ @property
516
+ def _offset_back_transform_codes(self):
517
+ return self._offset_packing_formats + self._list_len * 8
518
+
519
+ def count(self, value):
520
+ "L.count(value) -> integer -- return number of occurrences of value."
521
+
522
+ return sum(value == entry for entry in self)
523
+
524
+ def index(self, value):
525
+ """L.index(value) -> integer -- return first index of value.
526
+ Raises ValueError if the value is not present."""
527
+
528
+ for position, entry in enumerate(self):
529
+ if value == entry:
530
+ return position
531
+ else:
532
+ raise ValueError(f"{value!r} not in this container")
533
+
534
+ __class_getitem__ = classmethod(types.GenericAlias)
llava/lib/python3.10/tkinter/__pycache__/__init__.cpython-310.pyc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b158d2aa89e34fa95e53e17d29794fb111f70a3d9b32e894074df6a257c5dd5c
3
+ size 169622
llava/lib/python3.10/tkinter/__pycache__/commondialog.cpython-310.pyc ADDED
Binary file (1.6 kB). View file
 
llava/lib/python3.10/tkinter/__pycache__/filedialog.cpython-310.pyc ADDED
Binary file (12.7 kB). View file
 
llava/lib/python3.10/tkinter/__pycache__/simpledialog.cpython-310.pyc ADDED
Binary file (10.9 kB). View file
 
llava/lib/python3.10/tkinter/__pycache__/tix.cpython-310.pyc ADDED
Binary file (73.7 kB). View file
 
llava/lib/python3.10/tkinter/__pycache__/ttk.cpython-310.pyc ADDED
Binary file (55.9 kB). View file
 
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool3d_native.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor & _adaptive_avg_pool3d_out_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out);
20
+ TORCH_API at::Tensor adaptive_avg_pool3d_cpu(const at::Tensor & self, at::IntArrayRef output_size);
21
+ TORCH_API at::Tensor adaptive_avg_pool3d_cuda(const at::Tensor & self, at::IntArrayRef output_size);
22
+ TORCH_API at::Tensor adaptive_avg_pool3d_quantized_cpu(const at::Tensor & self, at::IntArrayRef output_size);
23
+ } // namespace native
24
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_amp_update_scale_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API ::std::tuple<at::Tensor,at::Tensor> _amp_update_scale(const at::Tensor & self, const at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval);
21
+ TORCH_API at::Tensor & _amp_update_scale_out(at::Tensor & out, const at::Tensor & self, at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval);
22
+ TORCH_API at::Tensor & _amp_update_scale_outf(const at::Tensor & self, at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval, at::Tensor & out);
23
+
24
+ } // namespace compositeexplicitautograd
25
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_assert_async.h ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_assert_async_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_assert_async(Tensor self) -> ()
26
+ inline void _assert_async(const at::Tensor & self) {
27
+ return at::_ops::_assert_async::call(self);
28
+ }
29
+
30
+ // aten::_assert_async.msg(Tensor self, str assert_msg) -> ()
31
+ inline void _assert_async(const at::Tensor & self, c10::string_view assert_msg) {
32
+ return at::_ops::_assert_async_msg::call(self, assert_msg);
33
+ }
34
+
35
+ }
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_coalesce_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API _coalesce {
18
+ using schema = at::Tensor (const at::Tensor &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_coalesce")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_coalesce(Tensor self) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & self);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self);
26
+ };
27
+
28
+ struct TORCH_API _coalesce_out {
29
+ using schema = at::Tensor & (const at::Tensor &, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_coalesce")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_coalesce.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
35
+ static at::Tensor & call(const at::Tensor & self, at::Tensor & out);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out);
37
+ };
38
+
39
+ }} // namespace at::_ops
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_eigh_cuda_dispatch.h ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cuda {
19
+
20
+ TORCH_API ::std::tuple<at::Tensor,at::Tensor> _linalg_eigh(const at::Tensor & A, c10::string_view UPLO="L", bool compute_v=true);
21
+ TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> _linalg_eigh_out(at::Tensor & eigenvalues, at::Tensor & eigenvectors, const at::Tensor & A, c10::string_view UPLO="L", bool compute_v=true);
22
+ TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> _linalg_eigh_outf(const at::Tensor & A, c10::string_view UPLO, bool compute_v, at::Tensor & eigenvalues, at::Tensor & eigenvectors);
23
+
24
+ } // namespace cuda
25
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_native_batch_norm_legit_no_training.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_native_batch_norm_legit_no_training_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_native_batch_norm_legit_no_training(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, float momentum, float eps) -> (Tensor, Tensor, Tensor)
26
+ inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit_no_training(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const at::Tensor & running_mean, const at::Tensor & running_var, double momentum, double eps) {
27
+ return at::_ops::_native_batch_norm_legit_no_training::call(input, weight, bias, running_mean, running_var, momentum, eps);
28
+ }
29
+
30
+ // aten::_native_batch_norm_legit_no_training.out(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, float momentum, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
31
+ inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _native_batch_norm_legit_no_training_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const at::Tensor & running_mean, const at::Tensor & running_var, double momentum, double eps) {
32
+ return at::_ops::_native_batch_norm_legit_no_training_out::call(input, weight, bias, running_mean, running_var, momentum, eps, out0, out1, out2);
33
+ }
34
+ // aten::_native_batch_norm_legit_no_training.out(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, float momentum, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))
35
+ inline ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> _native_batch_norm_legit_no_training_outf(const at::Tensor & input, const ::std::optional<at::Tensor> & weight, const ::std::optional<at::Tensor> & bias, const at::Tensor & running_mean, const at::Tensor & running_var, double momentum, double eps, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2) {
36
+ return at::_ops::_native_batch_norm_legit_no_training_out::call(input, weight, bias, running_mean, running_var, momentum, eps, out0, out1, out2);
37
+ }
38
+
39
+ }
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_compute_contiguous_strides_offsets_ops.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API _nested_compute_contiguous_strides_offsets {
18
+ using schema = ::std::tuple<at::Tensor,at::Tensor> (const at::Tensor &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_nested_compute_contiguous_strides_offsets")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_nested_compute_contiguous_strides_offsets(Tensor nested_size) -> (Tensor, Tensor)")
24
+ static ::std::tuple<at::Tensor,at::Tensor> call(const at::Tensor & nested_size);
25
+ static ::std::tuple<at::Tensor,at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & nested_size);
26
+ };
27
+
28
+ }} // namespace at::_ops
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_cudnn_attention_backward.h ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_scaled_dot_product_cudnn_attention_backward_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_scaled_dot_product_cudnn_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, Tensor philox_seed, Tensor philox_offset, *, float? scale=None) -> (Tensor, Tensor, Tensor)
26
+ inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_cudnn_attention_backward(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, bool is_causal, const at::Tensor & philox_seed, const at::Tensor & philox_offset, ::std::optional<double> scale=::std::nullopt) {
27
+ return at::_ops::_scaled_dot_product_cudnn_attention_backward::call(grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset, scale);
28
+ }
29
+ namespace symint {
30
+ template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
31
+ ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_cudnn_attention_backward(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, bool is_causal, const at::Tensor & philox_seed, const at::Tensor & philox_offset, ::std::optional<double> scale=::std::nullopt) {
32
+ return at::_ops::_scaled_dot_product_cudnn_attention_backward::call(grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset, scale);
33
+ }
34
+ }
35
+
36
+ // aten::_scaled_dot_product_cudnn_attention_backward(Tensor grad_out, Tensor query, Tensor key, Tensor value, Tensor out, Tensor logsumexp, Tensor cum_seq_q, Tensor cum_seq_k, SymInt max_q, SymInt max_k, float dropout_p, bool is_causal, Tensor philox_seed, Tensor philox_offset, *, float? scale=None) -> (Tensor, Tensor, Tensor)
37
+ inline ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_cudnn_attention_backward_symint(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, const at::Tensor & philox_seed, const at::Tensor & philox_offset, ::std::optional<double> scale=::std::nullopt) {
38
+ return at::_ops::_scaled_dot_product_cudnn_attention_backward::call(grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset, scale);
39
+ }
40
+ namespace symint {
41
+ template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
42
+ ::std::tuple<at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_cudnn_attention_backward(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, const at::Tensor & philox_seed, const at::Tensor & philox_offset, ::std::optional<double> scale=::std::nullopt) {
43
+ return at::_ops::_scaled_dot_product_cudnn_attention_backward::call(grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset, scale);
44
+ }
45
+ }
46
+
47
+ }
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_flash_attention_for_cpu.h ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/_scaled_dot_product_flash_attention_for_cpu_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::_scaled_dot_product_flash_attention_for_cpu(Tensor query, Tensor key, Tensor value, float dropout_p=0.0, bool is_causal=False, *, Tensor? attn_mask=None, float? scale=None) -> (Tensor output, Tensor logsumexp)
26
+ inline ::std::tuple<at::Tensor,at::Tensor> _scaled_dot_product_flash_attention_for_cpu(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, double dropout_p=0.0, bool is_causal=false, const ::std::optional<at::Tensor> & attn_mask={}, ::std::optional<double> scale=::std::nullopt) {
27
+ return at::_ops::_scaled_dot_product_flash_attention_for_cpu::call(query, key, value, dropout_p, is_causal, attn_mask, scale);
28
+ }
29
+
30
+ }
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_bsc_tensor_unsafe_native.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from NativeFunction.h
4
+
5
+ #include <c10/core/Scalar.h>
6
+ #include <c10/core/Storage.h>
7
+ #include <c10/core/TensorOptions.h>
8
+ #include <c10/util/Deprecated.h>
9
+ #include <c10/util/Optional.h>
10
+ #include <c10/core/QScheme.h>
11
+ #include <ATen/core/Reduction.h>
12
+ #include <ATen/core/Tensor.h>
13
+ #include <tuple>
14
+ #include <vector>
15
+
16
+
17
+ namespace at {
18
+ namespace native {
19
+ TORCH_API at::Tensor _sparse_bsc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional<at::ScalarType> dtype={}, ::std::optional<at::Layout> layout={}, ::std::optional<at::Device> device={}, ::std::optional<bool> pin_memory={});
20
+ } // namespace native
21
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_csr_sum_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API _sparse_csr_sum_dim_dtype {
18
+ using schema = at::Tensor (const at::Tensor &, at::IntArrayRef, bool, ::std::optional<at::ScalarType>);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_sparse_csr_sum")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dim_dtype")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_sparse_csr_sum.dim_dtype(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype);
26
+ };
27
+
28
+ struct TORCH_API _sparse_csr_sum_dim_dtype_out {
29
+ using schema = at::Tensor & (const at::Tensor &, at::IntArrayRef, bool, ::std::optional<at::ScalarType>, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_sparse_csr_sum")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dim_dtype_out")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_sparse_csr_sum.dim_dtype_out(Tensor self, int[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)")
35
+ static at::Tensor & call(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef dim, bool keepdim, ::std::optional<at::ScalarType> dtype, at::Tensor & out);
37
+ };
38
+
39
+ }} // namespace at::_ops
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_log_softmax_compositeimplicitautograd_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeimplicitautograd {
19
+
20
+ TORCH_API at::Tensor _sparse_log_softmax(const at::Tensor & self, int64_t dim, ::std::optional<at::ScalarType> dtype=::std::nullopt);
21
+ TORCH_API at::Tensor _sparse_log_softmax(const at::Tensor & self, at::Dimname dim, ::std::optional<at::ScalarType> dtype=::std::nullopt);
22
+
23
+ } // namespace compositeimplicitautograd
24
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_to_dense_compositeexplicitautograd_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeexplicitautograd {
19
+
20
+ TORCH_API at::Tensor & _to_dense_out(at::Tensor & out, const at::Tensor & self, ::std::optional<at::ScalarType> dtype=::std::nullopt, ::std::optional<bool> masked_grad=::std::nullopt);
21
+ TORCH_API at::Tensor & _to_dense_outf(const at::Tensor & self, ::std::optional<at::ScalarType> dtype, ::std::optional<bool> masked_grad, at::Tensor & out);
22
+
23
+ } // namespace compositeexplicitautograd
24
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/abs_ops.h ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API abs {
18
+ using schema = at::Tensor (const at::Tensor &);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::abs")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "abs(Tensor self) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & self);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self);
26
+ };
27
+
28
+ struct TORCH_API abs_ {
29
+ using schema = at::Tensor & (at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::abs_")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "abs_(Tensor(a!) self) -> Tensor(a!)")
35
+ static at::Tensor & call(at::Tensor & self);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self);
37
+ };
38
+
39
+ struct TORCH_API abs_out {
40
+ using schema = at::Tensor & (const at::Tensor &, at::Tensor &);
41
+ using ptr_schema = schema*;
42
+ // See Note [static constexpr char* members for windows NVCC]
43
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::abs")
44
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
45
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
46
+ static at::Tensor & call(const at::Tensor & self, at::Tensor & out);
47
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out);
48
+ };
49
+
50
+ }} // namespace at::_ops
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/argmin_ops.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API argmin {
18
+ using schema = at::Tensor (const at::Tensor &, ::std::optional<int64_t>, bool);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::argmin")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "argmin(Tensor self, int? dim=None, bool keepdim=False) -> Tensor")
24
+ static at::Tensor call(const at::Tensor & self, ::std::optional<int64_t> dim, bool keepdim);
25
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<int64_t> dim, bool keepdim);
26
+ };
27
+
28
+ struct TORCH_API argmin_out {
29
+ using schema = at::Tensor & (const at::Tensor &, ::std::optional<int64_t>, bool, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::argmin")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "argmin.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)")
35
+ static at::Tensor & call(const at::Tensor & self, ::std::optional<int64_t> dim, bool keepdim, at::Tensor & out);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, ::std::optional<int64_t> dim, bool keepdim, at::Tensor & out);
37
+ };
38
+
39
+ }} // namespace at::_ops
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/broadcast_to_compositeimplicitautograd_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace compositeimplicitautograd {
19
+
20
+ TORCH_API at::Tensor broadcast_to(const at::Tensor & self, at::IntArrayRef size);
21
+ TORCH_API at::Tensor broadcast_to_symint(const at::Tensor & self, c10::SymIntArrayRef size);
22
+
23
+ } // namespace compositeimplicitautograd
24
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/conj_physical_cpu_dispatch.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ // @generated by torchgen/gen.py from DispatchKeyFunction.h
3
+
4
+ // NB: The implementing C++ file is RegisterDispatchKey.cpp
5
+
6
+ // The only #includes we need are for custom classes that have defaults in the C++ API
7
+ #include <c10/core/MemoryFormat.h>
8
+ #include <c10/core/Scalar.h>
9
+ #include <ATen/core/Reduction.h>
10
+
11
+ // Forward declarations of any types needed in the operator signatures.
12
+ // We can't directly include these classes because it will cause circular include dependencies.
13
+ // This file is included by TensorBody.h, which defines the Tensor class.
14
+ #include <ATen/core/ATen_fwd.h>
15
+
16
+ namespace at {
17
+
18
+ namespace cpu {
19
+
20
+ TORCH_API at::Tensor & conj_physical_out(at::Tensor & out, const at::Tensor & self);
21
+ TORCH_API at::Tensor & conj_physical_outf(const at::Tensor & self, at::Tensor & out);
22
+
23
+ } // namespace cpu
24
+ } // namespace at
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_affine_grid_generator.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/cudnn_affine_grid_generator_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::cudnn_affine_grid_generator(Tensor theta, int N, int C, int H, int W) -> Tensor grid
26
+ inline at::Tensor cudnn_affine_grid_generator(const at::Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W) {
27
+ return at::_ops::cudnn_affine_grid_generator::call(theta, N, C, H, W);
28
+ }
29
+
30
+ // aten::cudnn_affine_grid_generator.out(Tensor theta, int N, int C, int H, int W, *, Tensor(a!) out) -> Tensor(a!)
31
+ inline at::Tensor & cudnn_affine_grid_generator_out(at::Tensor & out, const at::Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W) {
32
+ return at::_ops::cudnn_affine_grid_generator_out::call(theta, N, C, H, W, out);
33
+ }
34
+ // aten::cudnn_affine_grid_generator.out(Tensor theta, int N, int C, int H, int W, *, Tensor(a!) out) -> Tensor(a!)
35
+ inline at::Tensor & cudnn_affine_grid_generator_outf(const at::Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W, at::Tensor & out) {
36
+ return at::_ops::cudnn_affine_grid_generator_out::call(theta, N, C, H, W, out);
37
+ }
38
+
39
+ }
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_grid_sampler_backward.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Function.h
4
+
5
+ #include <ATen/Context.h>
6
+ #include <ATen/DeviceGuard.h>
7
+ #include <ATen/TensorUtils.h>
8
+ #include <ATen/TracerMode.h>
9
+ #include <ATen/core/Generator.h>
10
+ #include <ATen/core/Reduction.h>
11
+ #include <ATen/core/Tensor.h>
12
+ #include <c10/core/Scalar.h>
13
+ #include <c10/core/Storage.h>
14
+ #include <c10/core/TensorOptions.h>
15
+ #include <c10/util/Deprecated.h>
16
+ #include <c10/util/Optional.h>
17
+
18
+
19
+
20
+ #include <ATen/ops/cudnn_grid_sampler_backward_ops.h>
21
+
22
+ namespace at {
23
+
24
+
25
+ // aten::cudnn_grid_sampler_backward(Tensor self, Tensor grid, Tensor grad_output) -> (Tensor grad_self, Tensor grad_grid)
26
+ inline ::std::tuple<at::Tensor,at::Tensor> cudnn_grid_sampler_backward(const at::Tensor & self, const at::Tensor & grid, const at::Tensor & grad_output) {
27
+ return at::_ops::cudnn_grid_sampler_backward::call(self, grid, grad_output);
28
+ }
29
+
30
+ // aten::cudnn_grid_sampler_backward.out(Tensor self, Tensor grid, Tensor grad_output, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
31
+ inline ::std::tuple<at::Tensor &,at::Tensor &> cudnn_grid_sampler_backward_out(at::Tensor & out0, at::Tensor & out1, const at::Tensor & self, const at::Tensor & grid, const at::Tensor & grad_output) {
32
+ return at::_ops::cudnn_grid_sampler_backward_out::call(self, grid, grad_output, out0, out1);
33
+ }
34
+ // aten::cudnn_grid_sampler_backward.out(Tensor self, Tensor grid, Tensor grad_output, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))
35
+ inline ::std::tuple<at::Tensor &,at::Tensor &> cudnn_grid_sampler_backward_outf(const at::Tensor & self, const at::Tensor & grid, const at::Tensor & grad_output, at::Tensor & out0, at::Tensor & out1) {
36
+ return at::_ops::cudnn_grid_sampler_backward_out::call(self, grid, grad_output, out0, out1);
37
+ }
38
+
39
+ }