ZTWHHH commited on
Commit
22f6814
·
verified ·
1 Parent(s): d65be3a

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +5 -0
  2. parrot/lib/python3.10/site-packages/torch/_inductor/__pycache__/lowering.cpython-310.pyc +3 -0
  3. parrot/lib/python3.10/site-packages/torch/_logging/__init__.py +16 -0
  4. parrot/lib/python3.10/site-packages/torch/_logging/__pycache__/__init__.cpython-310.pyc +0 -0
  5. parrot/lib/python3.10/site-packages/torch/_logging/__pycache__/_internal.cpython-310.pyc +0 -0
  6. parrot/lib/python3.10/site-packages/torch/_logging/__pycache__/_registrations.cpython-310.pyc +0 -0
  7. parrot/lib/python3.10/site-packages/torch/_logging/_internal.py +1112 -0
  8. parrot/lib/python3.10/site-packages/torch/_logging/_registrations.py +154 -0
  9. parrot/lib/python3.10/site-packages/torch/_logging/structured.py +37 -0
  10. parrot/lib/python3.10/site-packages/torch/_numpy/__init__.py +30 -0
  11. parrot/lib/python3.10/site-packages/torch/_numpy/__pycache__/__init__.cpython-310.pyc +0 -0
  12. parrot/lib/python3.10/site-packages/torch/_numpy/__pycache__/_binary_ufuncs_impl.cpython-310.pyc +0 -0
  13. parrot/lib/python3.10/site-packages/torch/_numpy/__pycache__/_casting_dicts.cpython-310.pyc +0 -0
  14. parrot/lib/python3.10/site-packages/torch/_numpy/__pycache__/_dtypes.cpython-310.pyc +0 -0
  15. parrot/lib/python3.10/site-packages/torch/_numpy/__pycache__/_dtypes_impl.cpython-310.pyc +0 -0
  16. parrot/lib/python3.10/site-packages/torch/_numpy/__pycache__/_funcs.cpython-310.pyc +0 -0
  17. parrot/lib/python3.10/site-packages/torch/_numpy/__pycache__/_funcs_impl.cpython-310.pyc +0 -0
  18. parrot/lib/python3.10/site-packages/torch/_numpy/__pycache__/_getlimits.cpython-310.pyc +0 -0
  19. parrot/lib/python3.10/site-packages/torch/_numpy/__pycache__/_ndarray.cpython-310.pyc +0 -0
  20. parrot/lib/python3.10/site-packages/torch/_numpy/__pycache__/_normalizations.cpython-310.pyc +0 -0
  21. parrot/lib/python3.10/site-packages/torch/_numpy/__pycache__/_reductions_impl.cpython-310.pyc +0 -0
  22. parrot/lib/python3.10/site-packages/torch/_numpy/__pycache__/_ufuncs.cpython-310.pyc +0 -0
  23. parrot/lib/python3.10/site-packages/torch/_numpy/__pycache__/_unary_ufuncs_impl.cpython-310.pyc +0 -0
  24. parrot/lib/python3.10/site-packages/torch/_numpy/__pycache__/_util.cpython-310.pyc +0 -0
  25. parrot/lib/python3.10/site-packages/torch/_numpy/__pycache__/fft.cpython-310.pyc +0 -0
  26. parrot/lib/python3.10/site-packages/torch/_numpy/__pycache__/linalg.cpython-310.pyc +0 -0
  27. parrot/lib/python3.10/site-packages/torch/_numpy/__pycache__/random.cpython-310.pyc +0 -0
  28. parrot/lib/python3.10/site-packages/torch/_numpy/_dtypes.py +453 -0
  29. parrot/lib/python3.10/site-packages/torch/_numpy/_dtypes_impl.py +216 -0
  30. parrot/lib/python3.10/site-packages/torch/_numpy/_funcs.py +75 -0
  31. parrot/lib/python3.10/site-packages/torch/_numpy/_funcs_impl.py +2055 -0
  32. parrot/lib/python3.10/site-packages/torch/_numpy/_getlimits.py +15 -0
  33. parrot/lib/python3.10/site-packages/torch/_numpy/_ndarray.py +591 -0
  34. parrot/lib/python3.10/site-packages/torch/_numpy/_normalizations.py +258 -0
  35. parrot/lib/python3.10/site-packages/torch/_numpy/_ufuncs.py +334 -0
  36. parrot/lib/python3.10/site-packages/torch/_numpy/fft.py +130 -0
  37. parrot/lib/python3.10/site-packages/torch/_numpy/random.py +191 -0
  38. parrot/lib/python3.10/site-packages/torch/_numpy/testing/__init__.py +19 -0
  39. parrot/lib/python3.10/site-packages/torch/_numpy/testing/__pycache__/__init__.cpython-310.pyc +0 -0
  40. parrot/lib/python3.10/site-packages/torch/_numpy/testing/__pycache__/utils.cpython-310.pyc +0 -0
  41. parrot/lib/python3.10/site-packages/torch/_numpy/testing/utils.py +2390 -0
  42. parrot/lib/python3.10/site-packages/torch/_strobelight/__init__.py +0 -0
  43. parrot/lib/python3.10/site-packages/torch/_strobelight/__pycache__/__init__.cpython-310.pyc +0 -0
  44. parrot/lib/python3.10/site-packages/torch/_strobelight/__pycache__/cli_function_profiler.cpython-310.pyc +0 -0
  45. parrot/lib/python3.10/site-packages/torch/_strobelight/__pycache__/compile_time_profiler.cpython-310.pyc +0 -0
  46. parrot/lib/python3.10/site-packages/torch/_strobelight/cli_function_profiler.py +311 -0
  47. parrot/lib/python3.10/site-packages/torch/_strobelight/compile_time_profiler.py +156 -0
  48. parrot/lib/python3.10/site-packages/torch/mtia/__init__.py +263 -0
  49. parrot/lib/python3.10/site-packages/torch/mtia/__pycache__/__init__.cpython-310.pyc +0 -0
  50. parrot/lib/python3.10/site-packages/torch/mtia/__pycache__/_utils.cpython-310.pyc +0 -0
.gitattributes CHANGED
@@ -811,3 +811,8 @@ videochat2/lib/python3.10/site-packages/tensorflow/python/_pywrap_tfcompile.so f
811
  videochat2/lib/python3.10/site-packages/tensorflow/python/flags_pybind.so filter=lfs diff=lfs merge=lfs -text
812
  videochat2/lib/python3.10/site-packages/tensorflow/python/_pywrap_quantize_training.so filter=lfs diff=lfs merge=lfs -text
813
  videochat2/lib/python3.10/site-packages/tensorflow/python/_pywrap_toco_api.so filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
811
  videochat2/lib/python3.10/site-packages/tensorflow/python/flags_pybind.so filter=lfs diff=lfs merge=lfs -text
812
  videochat2/lib/python3.10/site-packages/tensorflow/python/_pywrap_quantize_training.so filter=lfs diff=lfs merge=lfs -text
813
  videochat2/lib/python3.10/site-packages/tensorflow/python/_pywrap_toco_api.so filter=lfs diff=lfs merge=lfs -text
814
+ parrot/lib/python3.10/site-packages/torch/sparse/__pycache__/_triton_ops_meta.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
815
+ parrot/lib/python3.10/site-packages/torch/_inductor/__pycache__/lowering.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
816
+ videochat2/lib/python3.10/site-packages/tensorflow/python/_pywrap_mlir.so filter=lfs diff=lfs merge=lfs -text
817
+ videochat2/lib/python3.10/site-packages/tensorflow/python/_pywrap_parallel_device.so filter=lfs diff=lfs merge=lfs -text
818
+ pllava/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_adv.so.9 filter=lfs diff=lfs merge=lfs -text
parrot/lib/python3.10/site-packages/torch/_inductor/__pycache__/lowering.cpython-310.pyc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25a708eb9d8e9efae3c557b4671dab6e9784cdd9d8553c2836a5c635699b57df
3
+ size 149249
parrot/lib/python3.10/site-packages/torch/_logging/__init__.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Top level logging module for torch logging
2
+ # Design doc: https://docs.google.com/document/d/1ZRfTWKa8eaPq1AxaiHrq4ASTPouzzlPiuquSBEJYwS8/edit#
3
+ # Simple setup for onboarding (see above doc for more detail):
4
+ # 1. register any top-level log qualified name for your module in torch._logging._registrations (see there for examples)
5
+ # 2. register any artifacts (<artifact_name> below) in torch._logging._registrations
6
+ # a. call getArtifactLogger(__name__, <artifact_name>) at your logging site instead of the standard logger to log your artifact
7
+ import torch._logging._registrations
8
+ from ._internal import (
9
+ _init_logs,
10
+ DEFAULT_LOGGING,
11
+ getArtifactLogger,
12
+ LazyString,
13
+ set_logs,
14
+ trace_structured,
15
+ warning_once,
16
+ )
parrot/lib/python3.10/site-packages/torch/_logging/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (407 Bytes). View file
 
parrot/lib/python3.10/site-packages/torch/_logging/__pycache__/_internal.cpython-310.pyc ADDED
Binary file (32.3 kB). View file
 
parrot/lib/python3.10/site-packages/torch/_logging/__pycache__/_registrations.cpython-310.pyc ADDED
Binary file (4.66 kB). View file
 
parrot/lib/python3.10/site-packages/torch/_logging/_internal.py ADDED
@@ -0,0 +1,1112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ import functools
3
+ import hashlib
4
+ import itertools
5
+ import json
6
+ import logging
7
+ import os
8
+ import os.path
9
+ import re
10
+ import tempfile
11
+ from dataclasses import dataclass, field
12
+ from importlib import __import__
13
+ from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
14
+ from weakref import WeakSet
15
+
16
+ import torch._logging.structured
17
+ from torch.utils._traceback import CapturedTraceback
18
+
19
+ log = logging.getLogger(__name__)
20
+
21
+ # This is a synthetic logger which doesn't correspond to an actual logger,
22
+ # but handles all of our "tracing" logging, which is structured and doesn't go
23
+ # to stderr but always goes to a dedicated log file. We don't put these
24
+ # loggers in the classic module hierarchy, because we don't want a suppression
25
+ # of logs to also cause a trace to get suppressed (traces typically are not
26
+ # collected, unless we are in prod, in which case they always are collected.)
27
+ #
28
+ # TODO: Maybe we should allow for some sub-hierarchy so you can control which
29
+ # traces you want to collect, for performance reasons.
30
+ #
31
+ # See https://docs.google.com/document/d/1CX_hJ0PNy9f3R1y8TJrfkSeLkvGjjjLU84BSXgS2AZ8/edit
32
+ trace_log = logging.getLogger("torch.__trace")
33
+
34
+ DEFAULT_LOG_LEVEL = logging.WARNING
35
+ LOG_ENV_VAR = "TORCH_LOGS"
36
+ LOG_OUT_ENV_VAR = "TORCH_LOGS_OUT"
37
+ LOG_FORMAT_ENV_VAR = "TORCH_LOGS_FORMAT"
38
+ TRACE_ENV_VAR = "TORCH_TRACE"
39
+
40
+
41
+ @dataclass
42
+ class LogRegistry:
43
+ # shorthand name to log qualified name
44
+ # Note: this only contains loggers registered
45
+ # from register_log
46
+ # e.g. "dynamo" -> "torch._dynamo"
47
+ log_alias_to_log_qnames: Dict[str, List[str]] = field(default_factory=dict)
48
+
49
+ # artifact logger qualified names,
50
+ # this is populated lazily, as calls to getArtifactLogger
51
+ # currently formatted as <module>.__<artifact_name>
52
+ # e.g. "torch._dynamo.convert_frame.__guards"
53
+ artifact_log_qnames: Set[str] = field(default_factory=set)
54
+
55
+ # child logs of registered logs if specified via open
56
+ # registration by the user (ie placing "torch._dynamo.output_graph" in the env var)
57
+ # these need to be tracked so their levels can be reset properly
58
+ # e.g. "torch._dynamo.output_graph"
59
+ child_log_qnames: Set[str] = field(default_factory=set)
60
+
61
+ # artifact names, populated by register_artifact
62
+ # e.g. "guards"
63
+ artifact_names: Set[str] = field(default_factory=set)
64
+
65
+ # Artifacts that should be visible by default in the error message
66
+ visible_artifacts: Set[str] = field(default_factory=set)
67
+
68
+ # A short description of each artifact
69
+ artifact_descriptions: Dict[str, str] = field(default_factory=dict)
70
+
71
+ # artifacts which are not displayed unless explicitly named in the
72
+ # settings. Ex. output_code is NOT displayed even if the inductor
73
+ # log level is set to DEBUG. It must be explicitly named in the settings
74
+ off_by_default_artifact_names: Set[str] = field(default_factory=set)
75
+
76
+ # logging format string for artifacts
77
+ artifact_log_formatters: Dict[str, logging.Formatter] = field(default_factory=dict)
78
+
79
+ def is_artifact(self, name):
80
+ return name in self.artifact_names
81
+
82
+ def is_log(self, alias):
83
+ return alias in self.log_alias_to_log_qnames
84
+
85
+ # register a log with an alias
86
+ def register_log(self, alias, log_qnames: Union[str, List[str]]):
87
+ if isinstance(log_qnames, str):
88
+ log_qnames = [log_qnames]
89
+ self.log_alias_to_log_qnames[alias] = log_qnames
90
+
91
+ # register an artifact name
92
+ def register_artifact_name(
93
+ self, name, description, visible, off_by_default, log_format
94
+ ):
95
+ self.artifact_names.add(name)
96
+ if visible:
97
+ self.visible_artifacts.add(name)
98
+ self.artifact_descriptions[name] = description
99
+
100
+ # if off by default, don't enable it
101
+ # when log_name's log_level is set to DEBUG
102
+ if off_by_default:
103
+ self.off_by_default_artifact_names.add(name)
104
+
105
+ if log_format is not None:
106
+ self.artifact_log_formatters[name] = logging.Formatter(log_format)
107
+
108
+ # register the qualified name of an artifact log
109
+ # this is needed to know which logs need to be reset
110
+ # whenever the log_state is changed
111
+ def register_artifact_log(self, artifact_log_qname):
112
+ self.artifact_log_qnames.add(artifact_log_qname)
113
+
114
+ def register_child_log(self, log_qname):
115
+ self.child_log_qnames.add(log_qname)
116
+
117
+ # flattens all the qnames together (TODO: consider memoizing?)
118
+ def get_log_qnames(self) -> Set[str]:
119
+ return {
120
+ qname
121
+ for qnames in self.log_alias_to_log_qnames.values()
122
+ for qname in qnames
123
+ }
124
+
125
+ def get_artifact_log_qnames(self):
126
+ return set(self.artifact_log_qnames)
127
+
128
+ def get_child_log_qnames(self):
129
+ return set(self.child_log_qnames)
130
+
131
+ def is_off_by_default(self, artifact_qname):
132
+ return artifact_qname in self.off_by_default_artifact_names
133
+
134
+
135
+ @dataclass
136
+ class LogState:
137
+ # qualified log names -> currently set log level
138
+ log_qname_to_level: Dict[str, str] = field(default_factory=dict)
139
+
140
+ # the set of currently enabled artifacts
141
+ artifact_names: Set[str] = field(default_factory=set)
142
+
143
+ def enable_artifact(self, artifact_name):
144
+ self.artifact_names.add(artifact_name)
145
+
146
+ def is_artifact_enabled(self, name):
147
+ return name in self.artifact_names
148
+
149
+ def enable_log(self, log_qnames, log_level):
150
+ if isinstance(log_qnames, str):
151
+ log_qnames = [log_qnames]
152
+ for log_qname in log_qnames:
153
+ self.log_qname_to_level[log_qname] = log_level
154
+
155
+ def get_log_level_pairs(self):
156
+ """Returns all qualified module names for which the user requested
157
+ explicit logging settings.
158
+
159
+ .. warning:
160
+
161
+ This function used to return all loggers, regardless of whether
162
+ or not the user specified them or not; it now only returns logs
163
+ which were explicitly mentioned by the user (and torch, which
164
+ always is implicitly requested when we initialize our logging
165
+ subsystem.)
166
+ """
167
+ return self.log_qname_to_level.items()
168
+
169
+ def clear(self):
170
+ self.log_qname_to_level.clear()
171
+ self.artifact_names.clear()
172
+
173
+
174
+ log_registry = LogRegistry()
175
+ log_state = LogState()
176
+
177
+ # sample usage: torch._logging.set_logs(**torch._logging.DEFAULT_LOGGING)
178
+ DEFAULT_LOGGING = {
179
+ "dynamo": logging.DEBUG,
180
+ "aot": logging.DEBUG,
181
+ "inductor": logging.DEBUG,
182
+ "ddp_graphs": True,
183
+ "graph_breaks": True,
184
+ "guards": True,
185
+ "recompiles": True,
186
+ "dynamic": logging.INFO,
187
+ }
188
+
189
+
190
+ def set_logs(
191
+ *,
192
+ all: Optional[int] = None,
193
+ dynamo: Optional[int] = None,
194
+ aot: Optional[int] = None,
195
+ autograd: Optional[int] = None,
196
+ dynamic: Optional[int] = None,
197
+ inductor: Optional[int] = None,
198
+ distributed: Optional[int] = None,
199
+ dist_c10d: Optional[int] = None,
200
+ dist_ddp: Optional[int] = None,
201
+ dist_fsdp: Optional[int] = None,
202
+ onnx: Optional[int] = None,
203
+ bytecode: bool = False,
204
+ aot_graphs: bool = False,
205
+ aot_joint_graph: bool = False,
206
+ ddp_graphs: bool = False,
207
+ graph: bool = False,
208
+ graph_code: bool = False,
209
+ graph_breaks: bool = False,
210
+ graph_sizes: bool = False,
211
+ guards: bool = False,
212
+ recompiles: bool = False,
213
+ recompiles_verbose: bool = False,
214
+ trace_source: bool = False,
215
+ trace_call: bool = False,
216
+ trace_bytecode: bool = False,
217
+ output_code: bool = False,
218
+ kernel_code: bool = False,
219
+ schedule: bool = False,
220
+ perf_hints: bool = False,
221
+ post_grad_graphs: bool = False,
222
+ onnx_diagnostics: bool = False,
223
+ fusion: bool = False,
224
+ overlap: bool = False,
225
+ export: Optional[int] = None,
226
+ modules: Optional[Dict[str, Union[int, bool]]] = None,
227
+ cudagraphs: bool = False,
228
+ sym_node: bool = False,
229
+ compiled_autograd_verbose: bool = False,
230
+ ):
231
+ """
232
+ Sets the log level for individual components and toggles individual log
233
+ artifact types.
234
+
235
+ .. warning:: This feature is a prototype and may have compatibility
236
+ breaking changes in the future.
237
+
238
+ .. note:: The ``TORCH_LOGS`` environment variable has complete precedence
239
+ over this function, so if it was set, this function does nothing.
240
+
241
+ A component is a set of related features in PyTorch. All of the log
242
+ messages emitted from a given component have their own log levels. If the
243
+ log level of a particular message has priority greater than or equal to its
244
+ component's log level setting, it is emitted. Otherwise, it is suppressed.
245
+ This allows you to, for instance, silence large groups of log messages that
246
+ are not relevant to you and increase verbosity of logs for components that
247
+ are relevant. The expected log level values, ordered from highest to lowest
248
+ priority, are:
249
+
250
+ * ``logging.CRITICAL``
251
+ * ``logging.ERROR``
252
+ * ``logging.WARNING``
253
+ * ``logging.INFO``
254
+ * ``logging.DEBUG``
255
+ * ``logging.NOTSET``
256
+
257
+ See documentation for the Python ``logging`` module for more information on
258
+ log levels: `<https://docs.python.org/3/library/logging.html#logging-levels>`_
259
+
260
+ An artifact is a particular type of log message. Each artifact is assigned
261
+ to a parent component. A component can emit many different kinds of
262
+ artifacts. In general, an artifact is emitted if either its corresponding
263
+ setting in the argument list below is turned on or if its parent component
264
+ is set to a log level less than or equal to the log level of the artifact.
265
+
266
+ Keyword args:
267
+ all (:class:`Optional[int]`):
268
+ The default log level for all components. Default: ``logging.WARN``
269
+
270
+ dynamo (:class:`Optional[int]`):
271
+ The log level for the TorchDynamo component. Default: ``logging.WARN``
272
+
273
+ aot (:class:`Optional[int]`):
274
+ The log level for the AOTAutograd component. Default: ``logging.WARN``
275
+
276
+ autograd (:class:`Optional[int]`):
277
+ The log level for autograd. Default: ``logging.WARN``
278
+
279
+ inductor (:class:`Optional[int]`):
280
+ The log level for the TorchInductor component. Default: ``logging.WARN``
281
+
282
+ dynamic (:class:`Optional[int]`):
283
+ The log level for dynamic shapes. Default: ``logging.WARN``
284
+
285
+ distributed (:class:`Optional[int]`):
286
+ Whether to log c10d communication operations and other debug info from PyTorch Distributed components.
287
+ Default: ``logging.WARN``
288
+
289
+ dist_c10d (:class:`Optional[int]`):
290
+ Whether to log c10d communication operations related debug info in PyTorch Distributed components.
291
+ Default: ``logging.WARN``
292
+
293
+ dist_ddp (:class:`Optional[int]`):
294
+ Whether to log debug info related to ``DistributedDataParallel``(DDP) from PyTorch Distributed components.
295
+ Default: ``logging.WARN``
296
+
297
+ dist_fsdp (:class:`Optional[int]`):
298
+ Whether to log debug info related to ``FullyShardedDataParallel``(FSDP) in PyTorch Distributed components.
299
+ Default: ``logging.WARN``
300
+
301
+ onnx (:class:`Optional[int]`):
302
+ The log level for the ONNX exporter component. Default: ``logging.WARN``
303
+
304
+ bytecode (:class:`bool`):
305
+ Whether to emit the original and generated bytecode from TorchDynamo.
306
+ Default: ``False``
307
+
308
+ aot_graphs (:class:`bool`):
309
+ Whether to emit the graphs generated by AOTAutograd. Default: ``False``
310
+
311
+ aot_joint_graph (:class:`bool`):
312
+ Whether to emit the joint forward-backward graph generated by AOTAutograd. Default: ``False``
313
+
314
+ inductor (:class:`Optional[int]`):
315
+ Whether to log information from inductor cudagraphs. Default: ``logging.WARN``
316
+
317
+ ddp_graphs (:class:`bool`):
318
+ Whether to emit graphs generated by DDPOptimizer. Default: ``False``
319
+
320
+ graph (:class:`bool`):
321
+ Whether to emit the graph captured by TorchDynamo in tabular format.
322
+ Default: ``False``
323
+
324
+ graph_code (:class:`bool`):
325
+ Whether to emit the python source of the graph captured by TorchDynamo.
326
+ Default: ``False``
327
+
328
+ graph_breaks (:class:`bool`):
329
+ Whether to emit the graph breaks encountered by TorchDynamo.
330
+ Default: ``False``
331
+
332
+ graph_sizes (:class:`bool`):
333
+ Whether to emit tensor sizes of the graph captured by TorchDynamo.
334
+ Default: ``False``
335
+
336
+ guards (:class:`bool`):
337
+ Whether to emit the guards generated by TorchDynamo for each compiled
338
+ function. Default: ``False``
339
+
340
+ recompiles (:class:`bool`):
341
+ Whether to emit a guard failure reason and message every time
342
+ TorchDynamo recompiles a function. Default: ``False``
343
+
344
+ recompiles_verbose (:class:`bool`):
345
+ Whether to emit all guard failure reasons when TorchDynamo recompiles
346
+ a function, even those that are not actually run. Default: ``False``
347
+
348
+ trace_source (:class:`bool`):
349
+ Whether to emit when TorchDynamo begins tracing a new line. Default: ``False``
350
+
351
+ trace_call (:class:`bool`):
352
+ Whether to emit detailed line location when TorchDynamo creates an FX node
353
+ corresponding to function call. Python 3.11+ only. Default: ``False``
354
+
355
+ trace_bytecode (:class:`bool`):
356
+ Whether to emit bytecode instructions and traced stack state as TorchDynamo
357
+ traces bytecode. Default: ``False``
358
+
359
+ output_code (:class:`bool`):
360
+ Whether to emit the TorchInductor output code on a per-graph basis. Default: ``False``
361
+
362
+ kernel_code (:class:`bool`):
363
+ Whether to emit the TorchInductor output code on a per-kernel bases. Default: ``False``
364
+
365
+ schedule (:class:`bool`):
366
+ Whether to emit the TorchInductor schedule. Default: ``False``
367
+
368
+ perf_hints (:class:`bool`):
369
+ Whether to emit the TorchInductor perf hints. Default: ``False``
370
+
371
+ post_grad_graphs (:class:`bool`):
372
+ Whether to emit the graphs generated by after post grad passes. Default: ``False``
373
+
374
+ onnx_diagnostics (:class:`bool`):
375
+ Whether to emit the ONNX exporter diagnostics in logging. Default: ``False``
376
+
377
+ fusion (:class:`bool`):
378
+ Whether to emit detailed Inductor fusion decisions. Default: ``False``
379
+
380
+ overlap (:class:`bool`):
381
+ Whether to emit detailed Inductor compute/comm overlap decisions. Default: ``False``
382
+
383
+ sym_node (:class:`bool`):
384
+ Whether to emit debug info for various SymNode opterations. Default: ``False``
385
+
386
+ export (:class:`Optional[int]`):
387
+ The log level for export. Default: ``logging.WARN``
388
+
389
+ modules (dict):
390
+ This argument provides an alternate way to specify the above log
391
+ component and artifact settings, in the format of a keyword args
392
+ dictionary given as a single argument. There are two cases
393
+ where this is useful (1) if a new log component or artifact has
394
+ been registered but a keyword argument for it has not been added
395
+ to this function and (2) if the log level for an unregistered module
396
+ needs to be set. This can be done by providing the fully-qualified module
397
+ name as the key, with the log level as the value. Default: ``None``
398
+
399
+
400
+ Example::
401
+
402
+ >>> # xdoctest: +SKIP
403
+ >>> import logging
404
+
405
+ # The following changes the "dynamo" component to emit DEBUG-level
406
+ # logs, and to emit "graph_code" artifacts.
407
+
408
+ >>> torch._logging.set_logs(dynamo=logging.DEBUG, graph_code=True)
409
+
410
+ # The following enables the logs for a different module
411
+
412
+ >>> torch._logging.set_logs(modules={"unregistered.module.name": logging.DEBUG})
413
+ """
414
+ # ignore if env var is set
415
+ if LOG_ENV_VAR in os.environ:
416
+ log.warning(
417
+ "Using TORCH_LOGS environment variable for log settings, ignoring call to set_logs"
418
+ )
419
+ return
420
+
421
+ log_state.clear()
422
+
423
+ modules = modules or {}
424
+
425
+ def _set_logs(**kwargs):
426
+ for alias, val in itertools.chain(kwargs.items(), modules.items()): # type: ignore[union-attr]
427
+ if val is None:
428
+ continue
429
+
430
+ if log_registry.is_artifact(alias):
431
+ if not isinstance(val, bool):
432
+ raise ValueError(
433
+ f"Expected bool to enable artifact {alias}, received {val}"
434
+ )
435
+
436
+ if val:
437
+ log_state.enable_artifact(alias)
438
+ elif log_registry.is_log(alias) or alias in log_registry.child_log_qnames:
439
+ if val not in logging._levelToName:
440
+ raise ValueError(
441
+ f"Unrecognized log level for log {alias}: {val}, valid level values "
442
+ f"are: {','.join([str(k) for k in logging._levelToName.keys()])}"
443
+ )
444
+
445
+ log_state.enable_log(
446
+ log_registry.log_alias_to_log_qnames.get(alias, alias), val
447
+ )
448
+ else:
449
+ raise ValueError(
450
+ f"Unrecognized log or artifact name passed to set_logs: {alias}"
451
+ )
452
+
453
+ _init_logs()
454
+
455
+ _set_logs(
456
+ torch=all,
457
+ dynamo=dynamo,
458
+ aot=aot,
459
+ autograd=autograd,
460
+ inductor=inductor,
461
+ dynamic=dynamic,
462
+ bytecode=bytecode,
463
+ aot_graphs=aot_graphs,
464
+ aot_joint_graph=aot_joint_graph,
465
+ ddp_graphs=ddp_graphs,
466
+ distributed=distributed,
467
+ dist_c10d=dist_c10d,
468
+ dist_ddp=dist_ddp,
469
+ dist_fsdp=dist_fsdp,
470
+ graph=graph,
471
+ graph_code=graph_code,
472
+ graph_breaks=graph_breaks,
473
+ graph_sizes=graph_sizes,
474
+ guards=guards,
475
+ recompiles=recompiles,
476
+ recompiles_verbose=recompiles_verbose,
477
+ trace_source=trace_source,
478
+ trace_call=trace_call,
479
+ trace_bytecode=trace_bytecode,
480
+ output_code=output_code,
481
+ kernel_code=kernel_code,
482
+ schedule=schedule,
483
+ perf_hints=perf_hints,
484
+ post_grad_graphs=post_grad_graphs,
485
+ onnx=onnx,
486
+ onnx_diagnostics=onnx_diagnostics,
487
+ fusion=fusion,
488
+ overlap=overlap,
489
+ sym_node=sym_node,
490
+ export=export,
491
+ cudagraphs=cudagraphs,
492
+ compiled_autograd_verbose=compiled_autograd_verbose,
493
+ )
494
+
495
+
496
+ def get_loggers():
497
+ """
498
+ Returns: a list of all registered loggers
499
+ """
500
+ return [logging.getLogger(qname) for qname in log_registry.get_log_qnames()]
501
+
502
+
503
+ def register_log(setting_name, log_name):
504
+ """
505
+ Enables a log to be controlled by the env var and user API with the setting_name
506
+ Args:
507
+ setting_name: the shorthand name used in the env var and user API
508
+ log_name: the log name that the setting_name is associated with
509
+ """
510
+ log_registry.register_log(setting_name, log_name)
511
+
512
+
513
+ def register_artifact(
514
+ setting_name, description, visible=False, off_by_default=False, log_format=None
515
+ ):
516
+ """
517
+ Enables an artifact to be controlled by the env var and user API with name
518
+ Args:
519
+ setting_name: the shorthand name used in the env var and user API
520
+ description: A description of what this outputs
521
+ visible: Whether it gets suggested to users by default
522
+ off_by_default: whether this artifact should be logged when the ancestor loggers
523
+ are enabled at level DEBUG
524
+ """
525
+ log_registry.register_artifact_name(
526
+ setting_name, description, visible, off_by_default, log_format
527
+ )
528
+
529
+
530
+ def getArtifactLogger(module_qname, artifact_name):
531
+ if artifact_name not in log_registry.artifact_names:
532
+ raise ValueError(
533
+ f"Artifact name: {repr(artifact_name)} not registered,"
534
+ f"please call register_artifact({repr(artifact_name)}) in torch._logging.registrations."
535
+ )
536
+ qname = module_qname + f".__{artifact_name}"
537
+ log = logging.getLogger(qname)
538
+ log.artifact_name = artifact_name # type: ignore[attr-defined]
539
+ log_registry.register_artifact_log(qname)
540
+ configure_artifact_log(log)
541
+ return log
542
+
543
+
544
+ INCR_VERBOSITY_CHAR = "+"
545
+ DECR_VERBOSITY_CHAR = "-"
546
+ VERBOSITY_REGEX = (
547
+ "("
548
+ + "|".join([re.escape(INCR_VERBOSITY_CHAR), re.escape(DECR_VERBOSITY_CHAR)])
549
+ + "?)"
550
+ )
551
+
552
+
553
+ def configure_artifact_log(log):
554
+ # If the artifact is off by default, then it should only be logged when explicitly
555
+ # enabled; set propagate to False so that this artifact is not propagated
556
+ # to its ancestor logger
557
+ if log_registry.is_off_by_default(log.artifact_name):
558
+ log.propagate = False
559
+
560
+ # enable artifact logging when explicitly enabled
561
+ if log_state.is_artifact_enabled(log.artifact_name):
562
+ log.setLevel(logging.DEBUG)
563
+ log.propagate = True
564
+
565
+
566
+ # match a comma separated list of loggable names (whitespace allowed after commas)
567
+ def _gen_settings_regex():
568
+ return re.compile(r"((\+|-)?[\w\.]+,\s*)*(\+|-)?[\w\.]+?")
569
+
570
+
571
+ def _validate_settings(settings):
572
+ return re.fullmatch(_gen_settings_regex(), settings) is not None
573
+
574
+
575
+ def help_message(verbose=False):
576
+ def pad_to(s, length=30):
577
+ assert len(s) <= length
578
+ return s + " " * (length - len(s))
579
+
580
+ if verbose:
581
+ printed_artifacts = log_registry.artifact_names
582
+ else:
583
+ printed_artifacts = log_registry.visible_artifacts
584
+
585
+ if verbose:
586
+ heading = "All registered names"
587
+ else:
588
+ heading = "Visible registered names (use TORCH_LOGS='+help' for full list)"
589
+ lines = (
590
+ ["all"]
591
+ + sorted(log_registry.log_alias_to_log_qnames.keys())
592
+ + sorted(
593
+ [
594
+ f"{pad_to(name)}\t{log_registry.artifact_descriptions[name]}"
595
+ for name in printed_artifacts
596
+ ]
597
+ )
598
+ )
599
+ setting_info = " " + "\n ".join(lines)
600
+ examples = """
601
+ Examples:
602
+ TORCH_LOGS="+dynamo,aot" will set the log level of TorchDynamo to
603
+ logging.DEBUG and AOT to logging.INFO
604
+
605
+ TORCH_LOGS="-dynamo,+inductor" will set the log level of TorchDynamo to
606
+ logging.ERROR and TorchInductor to logging.DEBUG
607
+
608
+ TORCH_LOGS="aot_graphs" will enable the aot_graphs artifact
609
+
610
+ TORCH_LOGS="+dynamo,schedule" will enable set the log level of TorchDynamo
611
+ to logging.DEBUG and enable the schedule artifact
612
+
613
+ TORCH_LOGS="+some.random.module,schedule" will set the log level of
614
+ some.random.module to logging.DEBUG and enable the schedule artifact
615
+
616
+ TORCH_LOGS_FORMAT="%(levelname)s: %(message)s" or any provided format
617
+ string will set the output format
618
+ Valid keys are "levelname", "message", "pathname", "levelno", "lineno",
619
+ "filename" and "name".
620
+
621
+ TORCH_LOGS_OUT=/tmp/output.txt will output the logs to /tmp/output.txt as
622
+ well. This is useful when the output is long.
623
+ """ # flake8: noqa: B950
624
+ msg = f"""
625
+ TORCH_LOGS Info
626
+ {examples}
627
+
628
+ {heading}
629
+ {setting_info}
630
+ """
631
+ return msg
632
+
633
+
634
+ def _invalid_settings_err_msg(settings, verbose=False):
635
+ valid_settings = ", ".join(
636
+ ["all"]
637
+ + list(log_registry.log_alias_to_log_qnames.keys())
638
+ + list(log_registry.artifact_names)
639
+ )
640
+ msg = f"""
641
+ Invalid log settings: {settings}, must be a comma separated list of fully
642
+ qualified module names, registered log names or registered artifact names.
643
+ For more info on various settings, try TORCH_LOGS="help"
644
+ Valid settings:
645
+ {valid_settings}
646
+ """
647
+ return msg
648
+
649
+
650
+ @functools.lru_cache
651
+ def _parse_log_settings(settings):
652
+ if settings == "":
653
+ return dict()
654
+
655
+ if settings == "help":
656
+ raise ValueError(help_message(verbose=False))
657
+ elif settings == "+help":
658
+ raise ValueError(help_message(verbose=True))
659
+ if not _validate_settings(settings):
660
+ raise ValueError(_invalid_settings_err_msg(settings))
661
+
662
+ settings = re.sub(r"\s+", "", settings)
663
+ log_names = settings.split(",")
664
+
665
+ def get_name_level_pair(name):
666
+ clean_name = name.replace(INCR_VERBOSITY_CHAR, "")
667
+ clean_name = clean_name.replace(DECR_VERBOSITY_CHAR, "")
668
+
669
+ if name[0] == INCR_VERBOSITY_CHAR:
670
+ level = logging.DEBUG
671
+ elif name[0] == DECR_VERBOSITY_CHAR:
672
+ level = logging.ERROR
673
+ else:
674
+ level = logging.INFO
675
+
676
+ return clean_name, level
677
+
678
+ log_state = LogState()
679
+
680
+ for name in log_names:
681
+ name, level = get_name_level_pair(name)
682
+
683
+ if name == "all":
684
+ name = "torch"
685
+
686
+ if log_registry.is_log(name):
687
+ assert level is not None
688
+ log_qnames = log_registry.log_alias_to_log_qnames[name]
689
+ log_state.enable_log(log_qnames, level)
690
+ elif log_registry.is_artifact(name):
691
+ log_state.enable_artifact(name)
692
+ elif _is_valid_module(name):
693
+ if not _has_registered_parent(name):
694
+ log_registry.register_log(name, name)
695
+ else:
696
+ log_registry.register_child_log(name)
697
+ log_state.enable_log(name, level)
698
+ else:
699
+ raise ValueError(_invalid_settings_err_msg(settings))
700
+
701
+ return log_state
702
+
703
+
704
+ def _is_valid_module(qname):
705
+ try:
706
+ __import__(qname)
707
+ return True
708
+ except ImportError:
709
+ return False
710
+
711
+
712
+ def _update_log_state_from_env():
713
+ global log_state
714
+ log_setting = os.environ.get(LOG_ENV_VAR, None)
715
+ if log_setting is not None:
716
+ log_state = _parse_log_settings(log_setting)
717
+
718
+
719
+ def _has_registered_parent(log_qname):
720
+ cur_log = logging.getLogger(log_qname)
721
+
722
+ registered_log_qnames = log_registry.get_log_qnames()
723
+
724
+ while cur_log.parent:
725
+ if cur_log.name in registered_log_qnames:
726
+ return True
727
+ cur_log = cur_log.parent
728
+
729
+ return False
730
+
731
+
732
+ # apply custom formats to artifacts when necessary
733
+ class TorchLogsFormatter(logging.Formatter):
734
+ def __init__(self, *, trace: bool = False):
735
+ super().__init__()
736
+ self._is_trace = trace
737
+
738
+ def format(self, record):
739
+ artifact_name = getattr(logging.getLogger(record.name), "artifact_name", None)
740
+ if artifact_name is not None:
741
+ artifact_formatter = log_registry.artifact_log_formatters.get(
742
+ artifact_name, None
743
+ )
744
+ if artifact_formatter is not None:
745
+ return artifact_formatter.format(record)
746
+
747
+ record.message = record.getMessage()
748
+ record.asctime = self.formatTime(record, "%m%d %H:%M:%S")
749
+
750
+ # exception handling - copied from logging.Formatter.format
751
+ s = record.message
752
+ if record.exc_info:
753
+ # Cache the traceback text to avoid converting it multiple times
754
+ # (it's constant anyway)
755
+ if not record.exc_text:
756
+ record.exc_text = self.formatException(record.exc_info)
757
+ if record.exc_text:
758
+ if s[-1:] != "\n":
759
+ s = s + "\n"
760
+ s = s + record.exc_text
761
+ if record.stack_info:
762
+ if s[-1:] != "\n":
763
+ s = s + "\n"
764
+ s = s + self.formatStack(record.stack_info)
765
+
766
+ record.rankprefix = ""
767
+ if not self._is_trace and dist.is_available() and dist.is_initialized():
768
+ record.rankprefix = f"[rank{dist.get_rank()}]:"
769
+
770
+ record.traceid = ""
771
+ if (
772
+ not self._is_trace
773
+ and (trace_id := torch._guards.CompileContext.current_trace_id())
774
+ is not None
775
+ ):
776
+ record.traceid = f" [{trace_id}]"
777
+
778
+ glog_level_to_abbr = {
779
+ "DEBUG": "V", # V is for VERBOSE in glog
780
+ "INFO": "I",
781
+ "WARNING": "W",
782
+ "ERROR": "E",
783
+ "CRITICAL": "C",
784
+ }
785
+
786
+ shortlevel = glog_level_to_abbr.get(record.levelname, record.levelname)
787
+
788
+ record.artifactprefix = ""
789
+ if artifact_name is not None:
790
+ record.artifactprefix = f" [__{artifact_name}]"
791
+
792
+ prefix = (
793
+ f"{record.rankprefix}{shortlevel}{record.asctime}.{int(record.msecs*1000):06d} {record.thread} "
794
+ f"{os.path.relpath(record.pathname, os.path.dirname(os.path.dirname(torch.__file__)))}:"
795
+ f"{record.lineno}]{record.traceid}{record.artifactprefix}"
796
+ )
797
+ if self._is_trace:
798
+ assert s == ""
799
+ try:
800
+ r = f"{prefix} {json.dumps(record.metadata)}"
801
+ except TypeError:
802
+ log.warning("failing metadata: %r", record.metadata)
803
+ raise
804
+ if record.payload is not None:
805
+ r += "".join(f"\n\t{l}" for l in record.payload.split("\n"))
806
+ return r
807
+ else:
808
+ lines = s.split("\n")
809
+ return "\n".join(f"{prefix} {l}" for l in lines)
810
+
811
+
812
+ def _default_formatter():
813
+ fmt = os.environ.get(LOG_FORMAT_ENV_VAR, None)
814
+ if fmt is None:
815
+ return TorchLogsFormatter()
816
+ else:
817
+ if fmt in ("short", "basic"):
818
+ fmt = logging.BASIC_FORMAT
819
+ return logging.Formatter(fmt)
820
+
821
+
822
+ DEFAULT_FORMATTER = _default_formatter()
823
+
824
+
825
+ def _setup_handlers(create_handler_fn, log):
826
+ debug_handler = _track_handler(create_handler_fn())
827
+ debug_handler.setFormatter(DEFAULT_FORMATTER)
828
+ debug_handler.setLevel(logging.DEBUG)
829
+ log.addHandler(debug_handler)
830
+
831
+
832
+ handlers = WeakSet() # type: ignore[var-annotated]
833
+
834
+
835
+ # mark handlers that we've created
836
+ # so we don't modify user handlers
837
+ def _track_handler(handler):
838
+ handlers.add(handler)
839
+ return handler
840
+
841
+
842
+ def _is_torch_handler(handler):
843
+ return handler in handlers
844
+
845
+
846
+ # clears all torch handlers on specified loggers
847
+ def _clear_handlers(log):
848
+ to_remove = [handler for handler in log.handlers if _is_torch_handler(handler)]
849
+ for handler in to_remove:
850
+ log.removeHandler(handler)
851
+
852
+
853
+ def _reset_logs():
854
+ # reset all registered logs
855
+ for log_qname in log_registry.get_log_qnames():
856
+ log = logging.getLogger(log_qname)
857
+ log.setLevel(logging.WARNING)
858
+ log.propagate = False
859
+ _clear_handlers(log)
860
+
861
+ # reset all artifact and child logs
862
+ for artifact_log_qname in itertools.chain(
863
+ log_registry.get_artifact_log_qnames(), log_registry.get_child_log_qnames()
864
+ ):
865
+ log = logging.getLogger(artifact_log_qname)
866
+ log.setLevel(logging.NOTSET)
867
+ log.propagate = True
868
+
869
+ trace_log.propagate = False
870
+ _clear_handlers(trace_log)
871
+
872
+
873
+ def _get_log_state():
874
+ return log_state
875
+
876
+
877
+ def _set_log_state(state):
878
+ global log_state
879
+ log_state = state
880
+
881
+
882
+ def _init_logs(log_file_name=None):
883
+ _reset_logs()
884
+ _update_log_state_from_env()
885
+
886
+ out = os.environ.get(LOG_OUT_ENV_VAR, None)
887
+ if out is not None:
888
+ log_file_name = out
889
+
890
+ # First, reset all known (registered) loggers to NOTSET, so that they
891
+ # respect their parent log level
892
+ for log_qname in log_registry.get_log_qnames():
893
+ # But not the top level torch level: this defaults to WARNING so
894
+ # that our log messages don't leak to the lower levels
895
+ if log_qname == "torch":
896
+ continue
897
+ log = logging.getLogger(log_qname)
898
+ log.setLevel(logging.NOTSET)
899
+
900
+ # Now, for all loggers which the user requested to have non-standard
901
+ # logging behavior, modify their log levels
902
+ for log_qname, level in log_state.get_log_level_pairs():
903
+ log = logging.getLogger(log_qname)
904
+ log.setLevel(level)
905
+
906
+ # Finally, setup handlers for all registered loggers
907
+ for log_qname in log_registry.get_log_qnames():
908
+ log = logging.getLogger(log_qname)
909
+ _setup_handlers(
910
+ logging.StreamHandler,
911
+ log,
912
+ )
913
+
914
+ if log_file_name is not None:
915
+ _setup_handlers(
916
+ lambda: logging.FileHandler(log_file_name),
917
+ log,
918
+ )
919
+
920
+ # configure artifact loggers, note: this must happen last
921
+ # since the levels of ancestor loggers are taken into account
922
+ for artifact_log_qname in log_registry.get_artifact_log_qnames():
923
+ log = logging.getLogger(artifact_log_qname)
924
+ configure_artifact_log(log)
925
+
926
+ # Setup handler for the special trace_log, with different default
927
+ # configuration
928
+ trace_dir_name = os.environ.get(TRACE_ENV_VAR, None)
929
+ # This handler may remove itself if trace_dir_name is None and we are not
930
+ # actually in an FB environment. This allows us to defer actually
931
+ # initializing it until we actually need to log anything. This is
932
+ # important because JK initializes a C++ singleton, which will pork our
933
+ # process if we subsequently fork.
934
+ handler = LazyTraceHandler(trace_dir_name)
935
+ # This log is ALWAYS at debug level. We will additionally test if there
936
+ # are any handlers before deciding to actually call logging on this. Do
937
+ # not manually call
938
+ trace_log.setLevel(logging.DEBUG)
939
+ trace_log_handler = _track_handler(handler)
940
+ trace_log_handler.setFormatter(TorchLogsFormatter(trace=True))
941
+ trace_log.addHandler(trace_log_handler)
942
+
943
+
944
+ class LazyTraceHandler(logging.StreamHandler):
945
+ """Like FileHandler, but the file is allocated lazily only upon the first log message"""
946
+
947
+ def __init__(self, root_dir: Optional[str]):
948
+ # This is implemented in the same way that delay is implemented on
949
+ # FileHandler
950
+ self.root_dir = root_dir
951
+ logging.Handler.__init__(self)
952
+ self.stream = None
953
+ self._builtin_open = open
954
+
955
+ # cloned from FileHandler in cpython
956
+ def close(self):
957
+ self.acquire()
958
+ try:
959
+ try:
960
+ if self.stream:
961
+ try:
962
+ self.flush()
963
+ finally:
964
+ stream = self.stream
965
+ self.stream = None
966
+ if hasattr(stream, "close"):
967
+ stream.close()
968
+ finally:
969
+ # Issue #19523: call unconditionally to
970
+ # prevent a handler leak when delay is set
971
+ # Also see Issue #42378: we also rely on
972
+ # self._closed being set to True there
973
+ logging.StreamHandler.close(self)
974
+ finally:
975
+ self.release()
976
+
977
+ def emit(self, record):
978
+ if self.stream is None:
979
+ ok = False
980
+ if self.root_dir is None:
981
+ TRACE_LOG_DIR = "/logs"
982
+ open_func = self._builtin_open
983
+
984
+ import torch.version as torch_version
985
+
986
+ if hasattr(torch_version, "git_version"):
987
+ log.info("LazyTraceHandler: disabled because not fbcode")
988
+ elif not torch._utils_internal.justknobs_check("pytorch/trace:enable"):
989
+ log.info(
990
+ "LazyTraceHandler: disabled because justknobs_check('pytorch/trace:enable') returned False"
991
+ )
992
+ elif not os.path.exists(TRACE_LOG_DIR):
993
+ log.info(
994
+ "LazyTraceHandler: disabled because %s does not exist",
995
+ TRACE_LOG_DIR,
996
+ )
997
+ elif not os.access(TRACE_LOG_DIR, os.W_OK):
998
+ log.info(
999
+ "LazyTraceHandler: disabled because %s is not writeable",
1000
+ TRACE_LOG_DIR,
1001
+ )
1002
+ else:
1003
+ self.root_dir = TRACE_LOG_DIR
1004
+
1005
+ if self.root_dir is not None:
1006
+ os.makedirs(self.root_dir, exist_ok=True)
1007
+ ranksuffix = ""
1008
+ if dist.is_available() and dist.is_initialized():
1009
+ ranksuffix = f"rank_{dist.get_rank()}_"
1010
+ self.stream = tempfile.NamedTemporaryFile(
1011
+ mode="w+",
1012
+ suffix=".log",
1013
+ prefix=f"dedicated_log_torch_trace_{ranksuffix}",
1014
+ dir=self.root_dir,
1015
+ delete=False,
1016
+ )
1017
+ log.info("LazyTraceHandler: logging to %s", self.stream.name)
1018
+ else:
1019
+ # We go poof, remove and no-op
1020
+ trace_log.removeHandler(self)
1021
+ return
1022
+ if self.stream:
1023
+ super().emit(record)
1024
+
1025
+
1026
+ @functools.lru_cache(None)
1027
+ def warning_once(logger_obj, *args, **kwargs):
1028
+ """
1029
+ This function is similar to `logger.warning()`, but will emit the warning with the same message only once
1030
+ Note: The cache is for the function arguments, so 2 different callers using the same arguments will hit the cache.
1031
+ The assumption here is that all warning messages are unique across the code. If they aren't then need to switch to
1032
+ another type of cache that includes the caller frame information in the hashing function.
1033
+ """
1034
+ logger_obj.warning(*args, **kwargs)
1035
+
1036
+
1037
+ class LazyString:
1038
+ def __init__(self, func, *args, **kwargs):
1039
+ self.func = func
1040
+ self.args = args
1041
+ self.kwargs = kwargs
1042
+
1043
+ def __str__(self):
1044
+ return self.func(*self.args, **self.kwargs)
1045
+
1046
+
1047
+ def trace_structured(
1048
+ name: str,
1049
+ # NB: metadata expected to be dict so adding more info is forward compatible
1050
+ # Tuple[str, int] is a special case for string interning
1051
+ metadata_fn: Callable[[], Union[Dict[str, Any], Tuple[str, int]]] = dict,
1052
+ *,
1053
+ payload_fn: Callable[[], Optional[Union[str, object]]] = lambda: None,
1054
+ suppress_context: bool = False,
1055
+ ):
1056
+ """
1057
+ metadata is an arbitrary JSON compatible struct, but it's expected to not be
1058
+ too long (e.g., less than 1MB)
1059
+
1060
+ payload is an arbitrary string, which can be arbitrarily long (but expected to have
1061
+ newlines so no lines are too long)
1062
+ """
1063
+ assert "name" not in ["rank", "frame_id", "frame_compile_id", "attempt"]
1064
+ assert callable(
1065
+ metadata_fn
1066
+ ), f"metadata_fn should be callable, but got {type(metadata_fn)}"
1067
+ assert callable(
1068
+ payload_fn
1069
+ ), f"payload_fn should be callable, but got {type(payload_fn)}"
1070
+ # trace_log never propagates and is ALWAYS DEBUG, so also check that there
1071
+ # are handlers instead of checking the log level
1072
+ if trace_log.handlers:
1073
+ record: Dict[str, object] = {}
1074
+ record[name] = metadata_fn()
1075
+ if not suppress_context:
1076
+ # TODO: Actually, the rank probably should just be emitted once at
1077
+ # the top, and not repeatedly spammed in all the logs, since it
1078
+ # never changes and we assume no interleaving
1079
+ if dist.is_available() and dist.is_initialized():
1080
+ record["rank"] = dist.get_rank()
1081
+ if (
1082
+ trace_id := torch._guards.CompileContext.current_trace_id()
1083
+ ) is not None:
1084
+ record["frame_id"] = trace_id.compile_id.frame_id
1085
+ record["frame_compile_id"] = trace_id.compile_id.frame_compile_id
1086
+ record["attempt"] = trace_id.attempt
1087
+ else:
1088
+ # Record the stack of the log call to better diagnose why we
1089
+ # don't have a frame id for it
1090
+ record["stack"] = torch._logging.structured.from_traceback(
1091
+ CapturedTraceback.extract(skip=1).summary()
1092
+ )
1093
+ payload = payload_fn()
1094
+ if payload is not None:
1095
+ if not isinstance(payload, str):
1096
+ if isinstance(payload, list):
1097
+ # special case to look better
1098
+ payload = "[\n" + ",\n".join(json.dumps(i) for i in payload) + "\n]"
1099
+ else:
1100
+ # force newlines so we are unlikely to overflow line limit
1101
+ payload = json.dumps(payload, indent=0)
1102
+ h = hashlib.md5()
1103
+ h.update(payload.encode("utf-8"))
1104
+ record["has_payload"] = h.hexdigest()
1105
+ trace_log.debug(
1106
+ "", extra={"metadata": record, "payload": payload}, stacklevel=2
1107
+ )
1108
+
1109
+
1110
+ import torch._guards
1111
+ import torch._utils_internal
1112
+ import torch.distributed as dist
parrot/lib/python3.10/site-packages/torch/_logging/_registrations.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa: B950
2
+ from ._internal import register_artifact, register_log
3
+
4
+ DYNAMIC = [
5
+ "torch.fx.experimental.symbolic_shapes",
6
+ "torch.fx.experimental.sym_node",
7
+ "torch.fx.experimental.recording",
8
+ ]
9
+ DISTRIBUTED = [
10
+ "torch.distributed",
11
+ "torch._dynamo.backends.distributed",
12
+ "torch.nn.parallel.distributed",
13
+ ]
14
+
15
+ register_log("dynamo", ["torch._dynamo", *DYNAMIC])
16
+ register_log("aot", ["torch._functorch.aot_autograd", "torch._functorch._aot_autograd"])
17
+ register_log("autograd", "torch.autograd")
18
+ register_log("inductor", ["torch._inductor", "torch._inductor.cudagraph_trees"])
19
+
20
+ register_artifact(
21
+ "cudagraphs",
22
+ "Logs information from wrapping inductor generated code with cudagraphs.",
23
+ )
24
+
25
+ register_log("dynamic", DYNAMIC)
26
+ register_log("torch", "torch")
27
+ register_log("distributed", DISTRIBUTED)
28
+ register_log(
29
+ "c10d", ["torch.distributed.distributed_c10d", "torch.distributed.rendezvous"]
30
+ )
31
+ register_log(
32
+ "ddp", ["torch.nn.parallel.distributed", "torch._dynamo.backends.distributed"]
33
+ )
34
+ register_log("pp", ["torch.distributed.pipelining"])
35
+ register_log("fsdp", ["torch.distributed.fsdp"])
36
+ register_log("onnx", "torch.onnx")
37
+ register_log("export", ["torch._dynamo", "torch.export", *DYNAMIC])
38
+
39
+ register_artifact(
40
+ "guards",
41
+ "This prints the guards for every compiled Dynamo frame. It does not tell you where the guards come from.",
42
+ visible=True,
43
+ )
44
+ register_artifact("verbose_guards", "", off_by_default=True)
45
+ register_artifact(
46
+ "bytecode",
47
+ "Prints the original and modified bytecode from Dynamo. Mostly useful if you're debugging our bytecode generation in Dynamo.",
48
+ off_by_default=True,
49
+ )
50
+ register_artifact(
51
+ "graph",
52
+ "Prints the dynamo traced graph (prior to AOTDispatch) in a table. If you prefer python code use `graph_code` instead. ",
53
+ )
54
+ register_artifact("graph_code", "Like `graph`, but gives you the Python code instead.")
55
+ register_artifact(
56
+ "graph_sizes", "Prints the sizes of all FX nodes in the dynamo graph."
57
+ )
58
+ register_artifact(
59
+ "trace_source",
60
+ "As we execute bytecode, prints the file name / line number we are processing and the actual source code. Useful with `bytecode`",
61
+ )
62
+ register_artifact(
63
+ "trace_call",
64
+ "Like trace_source, but it will give you the per-expression blow-by-blow if your Python is recent enough.",
65
+ )
66
+ register_artifact(
67
+ "trace_bytecode",
68
+ "As we trace bytecode, prints the instruction and the current stack.",
69
+ )
70
+ register_artifact(
71
+ "aot_graphs",
72
+ "Prints the FX forward and backward graph generated by AOTDispatch, after partitioning. Useful to understand what's being given to Inductor",
73
+ visible=True,
74
+ )
75
+ register_artifact(
76
+ "aot_joint_graph",
77
+ "Print FX joint graph from AOTAutograd, prior to partitioning. Useful for debugging partitioning",
78
+ )
79
+ register_artifact(
80
+ "post_grad_graphs",
81
+ "Prints the FX graph generated by post grad passes. Useful to understand what's being given to Inductor after post grad passes",
82
+ )
83
+ register_artifact(
84
+ "compiled_autograd",
85
+ "Prints various logs in compiled_autograd, including but not limited to the graphs. Useful for debugging compiled_autograd.",
86
+ visible=True,
87
+ )
88
+ register_artifact(
89
+ "compiled_autograd_verbose",
90
+ "Will affect performance. Prints compiled_autograd logs with C++ info e.g. autograd node -> fx node mapping",
91
+ off_by_default=True,
92
+ )
93
+ register_artifact(
94
+ "ddp_graphs",
95
+ "Only relevant for compiling DDP. DDP splits into multiple graphs to trigger comms early. This will print each individual graph here.",
96
+ )
97
+ register_artifact(
98
+ "recompiles",
99
+ "Prints the reason why we recompiled a graph. Very, very useful.",
100
+ visible=True,
101
+ )
102
+ register_artifact(
103
+ "recompiles_verbose",
104
+ "Prints all guard checks that fail during a recompilation. "
105
+ "At runtime, Dynamo will stop at the first failed check for each failing guard. "
106
+ "So not all logged failing checks are actually ran by Dynamo.",
107
+ visible=True,
108
+ off_by_default=True,
109
+ )
110
+ register_artifact(
111
+ "graph_breaks",
112
+ "Prints whenever Dynamo decides that it needs to graph break (i.e. create a new graph). Useful for debugging why torch.compile has poor performance",
113
+ visible=True,
114
+ )
115
+ register_artifact(
116
+ "not_implemented",
117
+ "Prints log messages whenever we return NotImplemented in a multi-dispatch, letting you trace through each object we attempted to dispatch to",
118
+ )
119
+ register_artifact(
120
+ "output_code",
121
+ "Prints the code that Inductor generates (either Triton or C++)",
122
+ off_by_default=True,
123
+ visible=True,
124
+ )
125
+ register_artifact(
126
+ "kernel_code",
127
+ "Prints the code that Inductor generates (on a per-kernel basis)",
128
+ off_by_default=True,
129
+ visible=True,
130
+ )
131
+ register_artifact(
132
+ "schedule",
133
+ "Inductor scheduler information. Useful if working on Inductor fusion algo",
134
+ off_by_default=True,
135
+ )
136
+ register_artifact("perf_hints", "", off_by_default=True)
137
+ register_artifact("onnx_diagnostics", "", off_by_default=True)
138
+ register_artifact(
139
+ "fusion",
140
+ "Detailed Inductor fusion decisions. More detailed than 'schedule'",
141
+ off_by_default=True,
142
+ )
143
+ register_artifact(
144
+ "overlap",
145
+ "Detailed Inductor compute/comm overlap decisions",
146
+ off_by_default=True,
147
+ )
148
+ register_artifact(
149
+ "sym_node",
150
+ "Logs extra info for various SymNode operations",
151
+ off_by_default=True,
152
+ )
153
+
154
+ register_artifact("custom_format_test_artifact", "Testing only", log_format="")
parrot/lib/python3.10/site-packages/torch/_logging/structured.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Utilities for converting data types into structured JSON for dumping.
3
+ """
4
+
5
+ import traceback
6
+ from typing import Dict, Sequence
7
+
8
+ import torch._logging._internal
9
+
10
+
11
+ INTERN_TABLE: Dict[str, int] = {}
12
+
13
+
14
+ def intern_string(s: str) -> int:
15
+ r = INTERN_TABLE.get(s, None)
16
+ if r is None:
17
+ r = len(INTERN_TABLE)
18
+ INTERN_TABLE[s] = r
19
+ torch._logging._internal.trace_structured(
20
+ "str", lambda: (s, r), suppress_context=True
21
+ )
22
+ return r
23
+
24
+
25
+ def from_traceback(tb: Sequence[traceback.FrameSummary]) -> object:
26
+ r = []
27
+ for frame in tb:
28
+ # dict naming convention here coincides with
29
+ # python/combined_traceback.cpp
30
+ r.append(
31
+ {
32
+ "line": frame.lineno,
33
+ "name": frame.name,
34
+ "filename": intern_string(frame.filename),
35
+ }
36
+ )
37
+ return r
parrot/lib/python3.10/site-packages/torch/_numpy/__init__.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ from . import fft, linalg, random
4
+ from ._dtypes import * # noqa: F403
5
+ from ._funcs import * # noqa: F403
6
+ from ._getlimits import finfo, iinfo
7
+ from ._ndarray import (
8
+ array,
9
+ asarray,
10
+ ascontiguousarray,
11
+ can_cast,
12
+ from_dlpack,
13
+ ndarray,
14
+ newaxis,
15
+ result_type,
16
+ )
17
+ from ._ufuncs import * # noqa: F403
18
+ from ._util import AxisError, UFuncTypeError
19
+
20
+ # from . import testing
21
+
22
+ alltrue = all
23
+ sometrue = any
24
+
25
+ inf = float("inf")
26
+ nan = float("nan")
27
+ from math import pi, e # isort: skip
28
+
29
+ False_ = False
30
+ True_ = True
parrot/lib/python3.10/site-packages/torch/_numpy/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (755 Bytes). View file
 
parrot/lib/python3.10/site-packages/torch/_numpy/__pycache__/_binary_ufuncs_impl.cpython-310.pyc ADDED
Binary file (1.77 kB). View file
 
parrot/lib/python3.10/site-packages/torch/_numpy/__pycache__/_casting_dicts.cpython-310.pyc ADDED
Binary file (11.1 kB). View file
 
parrot/lib/python3.10/site-packages/torch/_numpy/__pycache__/_dtypes.cpython-310.pyc ADDED
Binary file (10.2 kB). View file
 
parrot/lib/python3.10/site-packages/torch/_numpy/__pycache__/_dtypes_impl.cpython-310.pyc ADDED
Binary file (4.54 kB). View file
 
parrot/lib/python3.10/site-packages/torch/_numpy/__pycache__/_funcs.cpython-310.pyc ADDED
Binary file (1.65 kB). View file
 
parrot/lib/python3.10/site-packages/torch/_numpy/__pycache__/_funcs_impl.cpython-310.pyc ADDED
Binary file (42.7 kB). View file
 
parrot/lib/python3.10/site-packages/torch/_numpy/__pycache__/_getlimits.cpython-310.pyc ADDED
Binary file (498 Bytes). View file
 
parrot/lib/python3.10/site-packages/torch/_numpy/__pycache__/_ndarray.cpython-310.pyc ADDED
Binary file (16.3 kB). View file
 
parrot/lib/python3.10/site-packages/torch/_numpy/__pycache__/_normalizations.cpython-310.pyc ADDED
Binary file (6.68 kB). View file
 
parrot/lib/python3.10/site-packages/torch/_numpy/__pycache__/_reductions_impl.cpython-310.pyc ADDED
Binary file (7.97 kB). View file
 
parrot/lib/python3.10/site-packages/torch/_numpy/__pycache__/_ufuncs.cpython-310.pyc ADDED
Binary file (6.19 kB). View file
 
parrot/lib/python3.10/site-packages/torch/_numpy/__pycache__/_unary_ufuncs_impl.cpython-310.pyc ADDED
Binary file (1.5 kB). View file
 
parrot/lib/python3.10/site-packages/torch/_numpy/__pycache__/_util.cpython-310.pyc ADDED
Binary file (7.33 kB). View file
 
parrot/lib/python3.10/site-packages/torch/_numpy/__pycache__/fft.cpython-310.pyc ADDED
Binary file (3.28 kB). View file
 
parrot/lib/python3.10/site-packages/torch/_numpy/__pycache__/linalg.cpython-310.pyc ADDED
Binary file (5.58 kB). View file
 
parrot/lib/python3.10/site-packages/torch/_numpy/__pycache__/random.cpython-310.pyc ADDED
Binary file (4.35 kB). View file
 
parrot/lib/python3.10/site-packages/torch/_numpy/_dtypes.py ADDED
@@ -0,0 +1,453 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ """ Define analogs of numpy dtypes supported by pytorch.
4
+ Define the scalar types and supported dtypes and numpy <--> torch dtype mappings.
5
+ """
6
+ import builtins
7
+
8
+ import torch
9
+
10
+ from . import _dtypes_impl
11
+
12
+
13
+ # ### Scalar types ###
14
+
15
+
16
+ class generic:
17
+ name = "generic"
18
+
19
+ def __new__(cls, value):
20
+ # NumPy scalars are modelled as 0-D arrays
21
+ # so a call to np.float32(4) produces a 0-D array.
22
+
23
+ from ._ndarray import asarray, ndarray
24
+
25
+ if isinstance(value, str) and value in ["inf", "nan"]:
26
+ value = {"inf": torch.inf, "nan": torch.nan}[value]
27
+
28
+ if isinstance(value, ndarray):
29
+ return value.astype(cls)
30
+ else:
31
+ return asarray(value, dtype=cls)
32
+
33
+
34
+ ##################
35
+ # abstract types #
36
+ ##################
37
+
38
+
39
+ class number(generic):
40
+ name = "number"
41
+
42
+
43
+ class integer(number):
44
+ name = "integer"
45
+
46
+
47
+ class inexact(number):
48
+ name = "inexact"
49
+
50
+
51
+ class signedinteger(integer):
52
+ name = "signedinteger"
53
+
54
+
55
+ class unsignedinteger(integer):
56
+ name = "unsignedinteger"
57
+
58
+
59
+ class floating(inexact):
60
+ name = "floating"
61
+
62
+
63
+ class complexfloating(inexact):
64
+ name = "complexfloating"
65
+
66
+
67
+ _abstract_dtypes = [
68
+ "generic",
69
+ "number",
70
+ "integer",
71
+ "signedinteger",
72
+ "unsignedinteger",
73
+ "inexact",
74
+ "floating",
75
+ "complexfloating",
76
+ ]
77
+
78
+ # ##### concrete types
79
+
80
+ # signed integers
81
+
82
+
83
+ class int8(signedinteger):
84
+ name = "int8"
85
+ typecode = "b"
86
+ torch_dtype = torch.int8
87
+
88
+
89
+ class int16(signedinteger):
90
+ name = "int16"
91
+ typecode = "h"
92
+ torch_dtype = torch.int16
93
+
94
+
95
+ class int32(signedinteger):
96
+ name = "int32"
97
+ typecode = "i"
98
+ torch_dtype = torch.int32
99
+
100
+
101
+ class int64(signedinteger):
102
+ name = "int64"
103
+ typecode = "l"
104
+ torch_dtype = torch.int64
105
+
106
+
107
+ # unsigned integers
108
+
109
+
110
+ class uint8(unsignedinteger):
111
+ name = "uint8"
112
+ typecode = "B"
113
+ torch_dtype = torch.uint8
114
+
115
+
116
+ class uint16(unsignedinteger):
117
+ name = "uint16"
118
+ typecode = "H"
119
+ torch_dtype = torch.uint16
120
+
121
+
122
+ class uint32(signedinteger):
123
+ name = "uint32"
124
+ typecode = "I"
125
+ torch_dtype = torch.uint32
126
+
127
+
128
+ class uint64(signedinteger):
129
+ name = "uint64"
130
+ typecode = "L"
131
+ torch_dtype = torch.uint64
132
+
133
+
134
+ # floating point
135
+
136
+
137
+ class float16(floating):
138
+ name = "float16"
139
+ typecode = "e"
140
+ torch_dtype = torch.float16
141
+
142
+
143
+ class float32(floating):
144
+ name = "float32"
145
+ typecode = "f"
146
+ torch_dtype = torch.float32
147
+
148
+
149
+ class float64(floating):
150
+ name = "float64"
151
+ typecode = "d"
152
+ torch_dtype = torch.float64
153
+
154
+
155
+ class complex64(complexfloating):
156
+ name = "complex64"
157
+ typecode = "F"
158
+ torch_dtype = torch.complex64
159
+
160
+
161
+ class complex128(complexfloating):
162
+ name = "complex128"
163
+ typecode = "D"
164
+ torch_dtype = torch.complex128
165
+
166
+
167
+ class bool_(generic):
168
+ name = "bool_"
169
+ typecode = "?"
170
+ torch_dtype = torch.bool
171
+
172
+
173
+ # name aliases
174
+ _name_aliases = {
175
+ "intp": int64,
176
+ "int_": int64,
177
+ "intc": int32,
178
+ "byte": int8,
179
+ "short": int16,
180
+ "longlong": int64, # XXX: is this correct?
181
+ "ulonglong": uint64,
182
+ "ubyte": uint8,
183
+ "half": float16,
184
+ "single": float32,
185
+ "double": float64,
186
+ "float_": float64,
187
+ "csingle": complex64,
188
+ "singlecomplex": complex64,
189
+ "cdouble": complex128,
190
+ "cfloat": complex128,
191
+ "complex_": complex128,
192
+ }
193
+ # We register float_ = float32 and so on
194
+ for name, obj in _name_aliases.items():
195
+ vars()[name] = obj
196
+
197
+
198
+ # Replicate this NumPy-defined way of grouping scalar types,
199
+ # cf tests/core/test_scalar_methods.py
200
+ sctypes = {
201
+ "int": [int8, int16, int32, int64],
202
+ "uint": [uint8, uint16, uint32, uint64],
203
+ "float": [float16, float32, float64],
204
+ "complex": [complex64, complex128],
205
+ "others": [bool_],
206
+ }
207
+
208
+
209
+ # Support mappings/functions
210
+
211
+ _names = {st.name: st for cat in sctypes for st in sctypes[cat]}
212
+ _typecodes = {st.typecode: st for cat in sctypes for st in sctypes[cat]}
213
+ _torch_dtypes = {st.torch_dtype: st for cat in sctypes for st in sctypes[cat]}
214
+
215
+
216
+ _aliases = {
217
+ "u1": uint8,
218
+ "i1": int8,
219
+ "i2": int16,
220
+ "i4": int32,
221
+ "i8": int64,
222
+ "b": int8, # XXX: srsly?
223
+ "f2": float16,
224
+ "f4": float32,
225
+ "f8": float64,
226
+ "c8": complex64,
227
+ "c16": complex128,
228
+ # numpy-specific trailing underscore
229
+ "bool_": bool_,
230
+ }
231
+
232
+
233
+ _python_types = {
234
+ int: int64,
235
+ float: float64,
236
+ complex: complex128,
237
+ builtins.bool: bool_,
238
+ # also allow stringified names of python types
239
+ int.__name__: int64,
240
+ float.__name__: float64,
241
+ complex.__name__: complex128,
242
+ builtins.bool.__name__: bool_,
243
+ }
244
+
245
+
246
+ def sctype_from_string(s):
247
+ """Normalize a string value: a type 'name' or a typecode or a width alias."""
248
+ if s in _names:
249
+ return _names[s]
250
+ if s in _name_aliases.keys():
251
+ return _name_aliases[s]
252
+ if s in _typecodes:
253
+ return _typecodes[s]
254
+ if s in _aliases:
255
+ return _aliases[s]
256
+ if s in _python_types:
257
+ return _python_types[s]
258
+ raise TypeError(f"data type {s!r} not understood")
259
+
260
+
261
+ def sctype_from_torch_dtype(torch_dtype):
262
+ return _torch_dtypes[torch_dtype]
263
+
264
+
265
+ # ### DTypes. ###
266
+
267
+
268
+ def dtype(arg):
269
+ if arg is None:
270
+ arg = _dtypes_impl.default_dtypes().float_dtype
271
+ return DType(arg)
272
+
273
+
274
+ class DType:
275
+ def __init__(self, arg):
276
+ # a pytorch object?
277
+ if isinstance(arg, torch.dtype):
278
+ sctype = _torch_dtypes[arg]
279
+ elif isinstance(arg, torch.Tensor):
280
+ sctype = _torch_dtypes[arg.dtype]
281
+ # a scalar type?
282
+ elif issubclass_(arg, generic):
283
+ sctype = arg
284
+ # a dtype already?
285
+ elif isinstance(arg, DType):
286
+ sctype = arg._scalar_type
287
+ # a has a right attribute?
288
+ elif hasattr(arg, "dtype"):
289
+ sctype = arg.dtype._scalar_type
290
+ else:
291
+ sctype = sctype_from_string(arg)
292
+ self._scalar_type = sctype
293
+
294
+ @property
295
+ def name(self):
296
+ return self._scalar_type.name
297
+
298
+ @property
299
+ def type(self):
300
+ return self._scalar_type
301
+
302
+ @property
303
+ def kind(self):
304
+ # https://numpy.org/doc/stable/reference/generated/numpy.dtype.kind.html
305
+ return _torch_dtypes[self.torch_dtype].name[0]
306
+
307
+ @property
308
+ def typecode(self):
309
+ return self._scalar_type.typecode
310
+
311
+ def __eq__(self, other):
312
+ if isinstance(other, DType):
313
+ return self._scalar_type == other._scalar_type
314
+ try:
315
+ other_instance = DType(other)
316
+ except TypeError:
317
+ return False
318
+ return self._scalar_type == other_instance._scalar_type
319
+
320
+ @property
321
+ def torch_dtype(self):
322
+ return self._scalar_type.torch_dtype
323
+
324
+ def __hash__(self):
325
+ return hash(self._scalar_type.name)
326
+
327
+ def __repr__(self):
328
+ return f'dtype("{self.name}")'
329
+
330
+ __str__ = __repr__
331
+
332
+ @property
333
+ def itemsize(self):
334
+ elem = self.type(1)
335
+ return elem.tensor.element_size()
336
+
337
+ def __getstate__(self):
338
+ return self._scalar_type
339
+
340
+ def __setstate__(self, value):
341
+ self._scalar_type = value
342
+
343
+
344
+ typecodes = {
345
+ "All": "efdFDBbhil?",
346
+ "AllFloat": "efdFD",
347
+ "AllInteger": "Bbhil",
348
+ "Integer": "bhil",
349
+ "UnsignedInteger": "B",
350
+ "Float": "efd",
351
+ "Complex": "FD",
352
+ }
353
+
354
+
355
+ # ### Defaults and dtype discovery
356
+
357
+
358
+ def set_default_dtype(fp_dtype="numpy", int_dtype="numpy"):
359
+ """Set the (global) defaults for fp, complex, and int dtypes.
360
+
361
+ The complex dtype is inferred from the float (fp) dtype. It has
362
+ a width at least twice the width of the float dtype,
363
+ i.e., it's complex128 for float64 and complex64 for float32.
364
+
365
+ Parameters
366
+ ----------
367
+ fp_dtype
368
+ Allowed values are "numpy", "pytorch" or dtype_like things which
369
+ can be converted into a DType instance.
370
+ Default is "numpy" (i.e. float64).
371
+ int_dtype
372
+ Allowed values are "numpy", "pytorch" or dtype_like things which
373
+ can be converted into a DType instance.
374
+ Default is "numpy" (i.e. int64).
375
+
376
+ Returns
377
+ -------
378
+ The old default dtype state: a namedtuple with attributes ``float_dtype``,
379
+ ``complex_dtypes`` and ``int_dtype``. These attributes store *pytorch*
380
+ dtypes.
381
+
382
+ Notes
383
+ ------------
384
+ This functions has a side effect: it sets the global state with the provided dtypes.
385
+
386
+ The complex dtype has bit width of at least twice the width of the float
387
+ dtype, i.e. it's complex128 for float64 and complex64 for float32.
388
+
389
+ """
390
+ if fp_dtype not in ["numpy", "pytorch"]:
391
+ fp_dtype = dtype(fp_dtype).torch_dtype
392
+ if int_dtype not in ["numpy", "pytorch"]:
393
+ int_dtype = dtype(int_dtype).torch_dtype
394
+
395
+ if fp_dtype == "numpy":
396
+ float_dtype = torch.float64
397
+ elif fp_dtype == "pytorch":
398
+ float_dtype = torch.float32
399
+ else:
400
+ float_dtype = fp_dtype
401
+
402
+ complex_dtype = {
403
+ torch.float64: torch.complex128,
404
+ torch.float32: torch.complex64,
405
+ torch.float16: torch.complex64,
406
+ }[float_dtype]
407
+
408
+ if int_dtype in ["numpy", "pytorch"]:
409
+ int_dtype = torch.int64
410
+ else:
411
+ int_dtype = int_dtype
412
+
413
+ new_defaults = _dtypes_impl.DefaultDTypes(
414
+ float_dtype=float_dtype, complex_dtype=complex_dtype, int_dtype=int_dtype
415
+ )
416
+
417
+ # set the new global state and return the old state
418
+ old_defaults = _dtypes_impl.default_dtypes
419
+ _dtypes_impl._default_dtypes = new_defaults
420
+ return old_defaults
421
+
422
+
423
+ def issubclass_(arg, klass):
424
+ try:
425
+ return issubclass(arg, klass)
426
+ except TypeError:
427
+ return False
428
+
429
+
430
+ def issubdtype(arg1, arg2):
431
+ # cf https://github.com/numpy/numpy/blob/v1.24.0/numpy/core/numerictypes.py#L356-L420
432
+
433
+ # We also accept strings even if NumPy doesn't as dtypes are serialized as their
434
+ # string representation in dynamo's graph
435
+ def str_to_abstract(t):
436
+ if isinstance(t, str) and t in _abstract_dtypes:
437
+ return globals()[t]
438
+ return t
439
+
440
+ arg1 = str_to_abstract(arg1)
441
+ arg2 = str_to_abstract(arg2)
442
+
443
+ if not issubclass_(arg1, generic):
444
+ arg1 = dtype(arg1).type
445
+ if not issubclass_(arg2, generic):
446
+ arg2 = dtype(arg2).type
447
+ return issubclass(arg1, arg2)
448
+
449
+
450
+ __all__ = ["dtype", "DType", "typecodes", "issubdtype", "set_default_dtype", "sctypes"]
451
+ __all__ += list(_names.keys()) # noqa: PLE0605
452
+ __all__ += list(_name_aliases.keys()) # noqa: PLE0605
453
+ __all__ += _abstract_dtypes # noqa: PLE0605
parrot/lib/python3.10/site-packages/torch/_numpy/_dtypes_impl.py ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ """Dtypes/scalar type implementaions with torch dtypes.
4
+
5
+ Here `dtype` is always a torch.dtype, this module knows nothing about
6
+ scalar types, wrapper dtypes or anything like that. PyTorch only.
7
+ """
8
+ from collections import namedtuple
9
+
10
+ import torch
11
+
12
+ # defaults : mimic NumPy, allow user control
13
+ DefaultDTypes = namedtuple(
14
+ "DefaultDTypes", ["float_dtype", "complex_dtype", "int_dtype"]
15
+ )
16
+
17
+ # a global state
18
+ # We set it the first time we call default_dtypes() to avoid importing
19
+ # torch._dynamo.config and create a circular reference
20
+ _default_dtypes = None
21
+
22
+
23
+ def default_dtypes():
24
+ global _default_dtypes
25
+ if _default_dtypes is None:
26
+ import torch._dynamo.config as config
27
+
28
+ _default_dtypes = DefaultDTypes(
29
+ float_dtype=getattr(torch, config.numpy_default_float),
30
+ complex_dtype=getattr(torch, config.numpy_default_complex),
31
+ int_dtype=getattr(torch, config.numpy_default_int),
32
+ )
33
+ assert isinstance(_default_dtypes.float_dtype, torch.dtype)
34
+ assert isinstance(_default_dtypes.complex_dtype, torch.dtype)
35
+ assert isinstance(_default_dtypes.int_dtype, torch.dtype)
36
+ return _default_dtypes
37
+
38
+
39
+ def get_default_dtype_for(dtype):
40
+ """Default scalar type given sctype category."""
41
+ if dtype == torch.bool:
42
+ return dtype
43
+ if dtype.is_complex:
44
+ return default_dtypes().complex_dtype
45
+ if dtype.is_floating_point:
46
+ return default_dtypes().float_dtype
47
+ # else, it must be (some) integer
48
+ return default_dtypes().int_dtype
49
+
50
+
51
+ from . import _casting_dicts as _cd
52
+
53
+
54
+ def can_cast_impl(from_torch_dtype, to_torch_dtype, casting):
55
+ return _cd._can_cast_dict[casting][from_torch_dtype][to_torch_dtype]
56
+
57
+
58
+ def result_type_impl(*tensors):
59
+ # NB: torch dtypes here
60
+ dtyp = tensors[0].dtype
61
+ if len(tensors) == 1:
62
+ return dtyp
63
+
64
+ for curr in tensors[1:]:
65
+ dtyp = _cd._result_type_dict[dtyp][curr.dtype]
66
+
67
+ return dtyp
68
+
69
+
70
+ def python_type_for_torch(dtyp):
71
+ """Get a python scalar type a torch dtype"""
72
+ if dtyp.is_floating_point:
73
+ typ = float
74
+ elif dtyp.is_complex:
75
+ typ = complex
76
+ elif dtyp == torch.bool:
77
+ typ = bool
78
+ else:
79
+ typ = int
80
+ return typ
81
+
82
+
83
+ # ### NEP 50 helpers ###
84
+
85
+ _SCALAR_TYPES = (int, bool, float, complex)
86
+
87
+ _SCALAR_AND_SYMBOLIC_TYPES = (
88
+ *_SCALAR_TYPES,
89
+ torch.SymInt,
90
+ torch.SymFloat,
91
+ torch.SymBool,
92
+ )
93
+
94
+ _NEP50_FUNCS_TENSOR_ONLY = (
95
+ "minimum",
96
+ "maximum",
97
+ "logaddexp",
98
+ "logaddexp2",
99
+ "lcm",
100
+ "gcd",
101
+ "hypot",
102
+ "heaviside",
103
+ "fmod",
104
+ "fmin",
105
+ "fmax",
106
+ "copysign",
107
+ "arctan2",
108
+ )
109
+
110
+
111
+ def is_scalar(x):
112
+ return isinstance(x, _SCALAR_TYPES)
113
+
114
+
115
+ def is_scalar_or_symbolic(x):
116
+ return isinstance(x, _SCALAR_AND_SYMBOLIC_TYPES)
117
+
118
+
119
+ def _dtype_for_scalar(py_type):
120
+ return {
121
+ bool: torch.bool,
122
+ torch.SymBool: torch.bool,
123
+ int: torch.int64,
124
+ torch.SymInt: torch.int64,
125
+ float: torch.float64,
126
+ torch.SymFloat: torch.float64,
127
+ complex: torch.complex128,
128
+ }[py_type]
129
+
130
+
131
+ def _dtype_for_scalar_or_tensor(x):
132
+ return x.dtype if isinstance(x, torch.Tensor) else _dtype_for_scalar(type(x))
133
+
134
+
135
+ def is_float_or_fp_tensor(x):
136
+ return _dtype_for_scalar_or_tensor(x).is_floating_point
137
+
138
+
139
+ def is_complex_or_complex_tensor(x):
140
+ return _dtype_for_scalar_or_tensor(x).is_complex
141
+
142
+
143
+ def _category(dtype):
144
+ return {
145
+ torch.bool: 0,
146
+ torch.SymBool: 0,
147
+ # int
148
+ torch.uint8: 1,
149
+ torch.int8: 1,
150
+ torch.int16: 1,
151
+ torch.int32: 1,
152
+ torch.int64: 1,
153
+ torch.SymInt: 1,
154
+ # float
155
+ torch.float16: 2,
156
+ torch.float32: 2,
157
+ torch.float64: 2,
158
+ torch.SymFloat: 2,
159
+ # complex
160
+ torch.complex64: 3,
161
+ torch.complex128: 3,
162
+ }[dtype]
163
+
164
+
165
+ def nep50_to_tensors(x1, x2, handle_weaks, function_name):
166
+ """If either of inputs is a python scalar, type-promote with NEP 50."""
167
+
168
+ def to_tensor(scalar, dtype=None):
169
+ if dtype is None:
170
+ dtype = _dtype_for_scalar(type(scalar))
171
+ dtype = get_default_dtype_for(dtype)
172
+ return torch.as_tensor(scalar, dtype=dtype)
173
+
174
+ x1_is_weak = not isinstance(x1, torch.Tensor)
175
+ x2_is_weak = not isinstance(x2, torch.Tensor)
176
+ if not handle_weaks or (x1_is_weak and x2_is_weak):
177
+ x1 = to_tensor(x1) if x1_is_weak else x1
178
+ x2 = to_tensor(x2) if x2_is_weak else x2
179
+ return x1, x2
180
+
181
+ # scalar <op> tensor: NEP 50
182
+ assert x1_is_weak != x2_is_weak
183
+
184
+ weak, not_weak = (x1, x2) if x1_is_weak else (x2, x1)
185
+
186
+ # find the dtype for the weak's type
187
+ weak_dtype = _dtype_for_scalar(type(weak))
188
+
189
+ cat_weak = _category(weak_dtype)
190
+ cat_not_weak = _category(not_weak.dtype)
191
+
192
+ dt = not_weak.dtype if cat_weak <= cat_not_weak else None
193
+
194
+ # special-case complex + float32
195
+ if weak_dtype.is_complex and not_weak.dtype == torch.float32:
196
+ dt = torch.complex64
197
+
198
+ # detect overflows: in PyTorch, uint8(-1) wraps around to 255,
199
+ # while NEP50 mandates an exception.
200
+ #
201
+ # Note that we only check if each element of the binop overflows,
202
+ # not the result. Consider, e.g. `uint8(100) + 200`. Operands are OK
203
+ # in uint8, but the result overflows and wrap around 255.
204
+ # Numpy emits a RuntimeWarning, PyTorch does not, and we do not either.
205
+ if cat_weak == 1 and cat_not_weak == 1:
206
+ # integers
207
+ iinfo = torch.iinfo(not_weak.dtype)
208
+ if not (iinfo.min <= weak <= iinfo.max):
209
+ raise OverflowError(
210
+ f"Python integer {weak} out of bounds for {not_weak.dtype}"
211
+ )
212
+ if weak_dtype != dt or function_name in _NEP50_FUNCS_TENSOR_ONLY:
213
+ # finally, can make `weak` into a 0D tensor, if both parameters are required to be tensor.
214
+ weak = to_tensor(weak, dt)
215
+
216
+ return (weak, not_weak) if x1_is_weak else (not_weak, weak)
parrot/lib/python3.10/site-packages/torch/_numpy/_funcs.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import inspect
4
+ import itertools
5
+
6
+ from . import _funcs_impl, _reductions_impl
7
+ from ._normalizations import normalizer
8
+
9
+ # _funcs_impl.py contains functions which mimic NumPy's eponymous equivalents,
10
+ # and consume/return PyTorch tensors/dtypes.
11
+ # They are also type annotated.
12
+ # Pull these functions from _funcs_impl and decorate them with @normalizer, which
13
+ # - Converts any input `np.ndarray`, `torch._numpy.ndarray`, list of lists, Python scalars, etc into a `torch.Tensor`.
14
+ # - Maps NumPy dtypes to PyTorch dtypes
15
+ # - If the input to the `axis` kwarg is an ndarray, it maps it into a tuple
16
+ # - Implements the semantics for the `out=` arg
17
+ # - Wraps back the outputs into `torch._numpy.ndarrays`
18
+
19
+
20
+ def _public_functions(mod):
21
+ def is_public_function(f):
22
+ return inspect.isfunction(f) and not f.__name__.startswith("_")
23
+
24
+ return inspect.getmembers(mod, is_public_function)
25
+
26
+
27
+ # We fill in __all__ in the loop below
28
+ __all__ = []
29
+
30
+ # decorate implementer functions with argument normalizers and export to the top namespace
31
+ for name, func in itertools.chain(
32
+ _public_functions(_funcs_impl), _public_functions(_reductions_impl)
33
+ ):
34
+ if name in ["percentile", "quantile", "median"]:
35
+ decorated = normalizer(func, promote_scalar_result=True)
36
+ elif name == "einsum":
37
+ # normalized manually
38
+ decorated = func
39
+ else:
40
+ decorated = normalizer(func)
41
+
42
+ decorated.__qualname__ = name
43
+ decorated.__name__ = name
44
+ vars()[name] = decorated
45
+ __all__.append(name)
46
+
47
+
48
+ """
49
+ Vendored objects from numpy.lib.index_tricks
50
+ """
51
+
52
+
53
+ class IndexExpression:
54
+ """
55
+ Written by Konrad Hinsen <hinsen@cnrs-orleans.fr>
56
+ last revision: 1999-7-23
57
+
58
+ Cosmetic changes by T. Oliphant 2001
59
+ """
60
+
61
+ def __init__(self, maketuple):
62
+ self.maketuple = maketuple
63
+
64
+ def __getitem__(self, item):
65
+ if self.maketuple and not isinstance(item, tuple):
66
+ return (item,)
67
+ else:
68
+ return item
69
+
70
+
71
+ index_exp = IndexExpression(maketuple=True)
72
+ s_ = IndexExpression(maketuple=False)
73
+
74
+
75
+ __all__ += ["index_exp", "s_"]
parrot/lib/python3.10/site-packages/torch/_numpy/_funcs_impl.py ADDED
@@ -0,0 +1,2055 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ """A thin pytorch / numpy compat layer.
4
+
5
+ Things imported from here have numpy-compatible signatures but operate on
6
+ pytorch tensors.
7
+ """
8
+ # Contents of this module ends up in the main namespace via _funcs.py
9
+ # where type annotations are used in conjunction with the @normalizer decorator.
10
+ from __future__ import annotations
11
+
12
+ import builtins
13
+ import itertools
14
+ import operator
15
+ from typing import Optional, Sequence, TYPE_CHECKING
16
+
17
+ import torch
18
+
19
+ from . import _dtypes_impl, _util
20
+
21
+ if TYPE_CHECKING:
22
+ from ._normalizations import (
23
+ ArrayLike,
24
+ ArrayLikeOrScalar,
25
+ CastingModes,
26
+ DTypeLike,
27
+ NDArray,
28
+ NotImplementedType,
29
+ OutArray,
30
+ )
31
+
32
+
33
+ def copy(
34
+ a: ArrayLike, order: NotImplementedType = "K", subok: NotImplementedType = False
35
+ ):
36
+ return a.clone()
37
+
38
+
39
+ def copyto(
40
+ dst: NDArray,
41
+ src: ArrayLike,
42
+ casting: Optional[CastingModes] = "same_kind",
43
+ where: NotImplementedType = None,
44
+ ):
45
+ (src,) = _util.typecast_tensors((src,), dst.dtype, casting=casting)
46
+ dst.copy_(src)
47
+
48
+
49
+ def atleast_1d(*arys: ArrayLike):
50
+ res = torch.atleast_1d(*arys)
51
+ if isinstance(res, tuple):
52
+ return list(res)
53
+ else:
54
+ return res
55
+
56
+
57
+ def atleast_2d(*arys: ArrayLike):
58
+ res = torch.atleast_2d(*arys)
59
+ if isinstance(res, tuple):
60
+ return list(res)
61
+ else:
62
+ return res
63
+
64
+
65
+ def atleast_3d(*arys: ArrayLike):
66
+ res = torch.atleast_3d(*arys)
67
+ if isinstance(res, tuple):
68
+ return list(res)
69
+ else:
70
+ return res
71
+
72
+
73
+ def _concat_check(tup, dtype, out):
74
+ if tup == ():
75
+ raise ValueError("need at least one array to concatenate")
76
+
77
+ """Check inputs in concatenate et al."""
78
+ if out is not None and dtype is not None:
79
+ # mimic numpy
80
+ raise TypeError(
81
+ "concatenate() only takes `out` or `dtype` as an "
82
+ "argument, but both were provided."
83
+ )
84
+
85
+
86
+ def _concat_cast_helper(tensors, out=None, dtype=None, casting="same_kind"):
87
+ """Figure out dtypes, cast if necessary."""
88
+
89
+ if out is not None or dtype is not None:
90
+ # figure out the type of the inputs and outputs
91
+ out_dtype = out.dtype.torch_dtype if dtype is None else dtype
92
+ else:
93
+ out_dtype = _dtypes_impl.result_type_impl(*tensors)
94
+
95
+ # cast input arrays if necessary; do not broadcast them agains `out`
96
+ tensors = _util.typecast_tensors(tensors, out_dtype, casting)
97
+
98
+ return tensors
99
+
100
+
101
+ def _concatenate(
102
+ tensors, axis=0, out=None, dtype=None, casting: Optional[CastingModes] = "same_kind"
103
+ ):
104
+ # pure torch implementation, used below and in cov/corrcoef below
105
+ tensors, axis = _util.axis_none_flatten(*tensors, axis=axis)
106
+ tensors = _concat_cast_helper(tensors, out, dtype, casting)
107
+ return torch.cat(tensors, axis)
108
+
109
+
110
+ def concatenate(
111
+ ar_tuple: Sequence[ArrayLike],
112
+ axis=0,
113
+ out: Optional[OutArray] = None,
114
+ dtype: Optional[DTypeLike] = None,
115
+ casting: Optional[CastingModes] = "same_kind",
116
+ ):
117
+ _concat_check(ar_tuple, dtype, out=out)
118
+ result = _concatenate(ar_tuple, axis=axis, out=out, dtype=dtype, casting=casting)
119
+ return result
120
+
121
+
122
+ def vstack(
123
+ tup: Sequence[ArrayLike],
124
+ *,
125
+ dtype: Optional[DTypeLike] = None,
126
+ casting: Optional[CastingModes] = "same_kind",
127
+ ):
128
+ _concat_check(tup, dtype, out=None)
129
+ tensors = _concat_cast_helper(tup, dtype=dtype, casting=casting)
130
+ return torch.vstack(tensors)
131
+
132
+
133
+ row_stack = vstack
134
+
135
+
136
+ def hstack(
137
+ tup: Sequence[ArrayLike],
138
+ *,
139
+ dtype: Optional[DTypeLike] = None,
140
+ casting: Optional[CastingModes] = "same_kind",
141
+ ):
142
+ _concat_check(tup, dtype, out=None)
143
+ tensors = _concat_cast_helper(tup, dtype=dtype, casting=casting)
144
+ return torch.hstack(tensors)
145
+
146
+
147
+ def dstack(
148
+ tup: Sequence[ArrayLike],
149
+ *,
150
+ dtype: Optional[DTypeLike] = None,
151
+ casting: Optional[CastingModes] = "same_kind",
152
+ ):
153
+ # XXX: in numpy 1.24 dstack does not have dtype and casting keywords
154
+ # but {h,v}stack do. Hence add them here for consistency.
155
+ _concat_check(tup, dtype, out=None)
156
+ tensors = _concat_cast_helper(tup, dtype=dtype, casting=casting)
157
+ return torch.dstack(tensors)
158
+
159
+
160
+ def column_stack(
161
+ tup: Sequence[ArrayLike],
162
+ *,
163
+ dtype: Optional[DTypeLike] = None,
164
+ casting: Optional[CastingModes] = "same_kind",
165
+ ):
166
+ # XXX: in numpy 1.24 column_stack does not have dtype and casting keywords
167
+ # but row_stack does. (because row_stack is an alias for vstack, really).
168
+ # Hence add these keywords here for consistency.
169
+ _concat_check(tup, dtype, out=None)
170
+ tensors = _concat_cast_helper(tup, dtype=dtype, casting=casting)
171
+ return torch.column_stack(tensors)
172
+
173
+
174
+ def stack(
175
+ arrays: Sequence[ArrayLike],
176
+ axis=0,
177
+ out: Optional[OutArray] = None,
178
+ *,
179
+ dtype: Optional[DTypeLike] = None,
180
+ casting: Optional[CastingModes] = "same_kind",
181
+ ):
182
+ _concat_check(arrays, dtype, out=out)
183
+
184
+ tensors = _concat_cast_helper(arrays, dtype=dtype, casting=casting)
185
+ result_ndim = tensors[0].ndim + 1
186
+ axis = _util.normalize_axis_index(axis, result_ndim)
187
+ return torch.stack(tensors, axis=axis)
188
+
189
+
190
+ def append(arr: ArrayLike, values: ArrayLike, axis=None):
191
+ if axis is None:
192
+ if arr.ndim != 1:
193
+ arr = arr.flatten()
194
+ values = values.flatten()
195
+ axis = arr.ndim - 1
196
+ return _concatenate((arr, values), axis=axis)
197
+
198
+
199
+ # ### split ###
200
+
201
+
202
+ def _split_helper(tensor, indices_or_sections, axis, strict=False):
203
+ if isinstance(indices_or_sections, int):
204
+ return _split_helper_int(tensor, indices_or_sections, axis, strict)
205
+ elif isinstance(indices_or_sections, (list, tuple)):
206
+ # NB: drop split=..., it only applies to split_helper_int
207
+ return _split_helper_list(tensor, list(indices_or_sections), axis)
208
+ else:
209
+ raise TypeError("split_helper: ", type(indices_or_sections))
210
+
211
+
212
+ def _split_helper_int(tensor, indices_or_sections, axis, strict=False):
213
+ if not isinstance(indices_or_sections, int):
214
+ raise NotImplementedError("split: indices_or_sections")
215
+
216
+ axis = _util.normalize_axis_index(axis, tensor.ndim)
217
+
218
+ # numpy: l%n chunks of size (l//n + 1), the rest are sized l//n
219
+ l, n = tensor.shape[axis], indices_or_sections
220
+
221
+ if n <= 0:
222
+ raise ValueError
223
+
224
+ if l % n == 0:
225
+ num, sz = n, l // n
226
+ lst = [sz] * num
227
+ else:
228
+ if strict:
229
+ raise ValueError("array split does not result in an equal division")
230
+
231
+ num, sz = l % n, l // n + 1
232
+ lst = [sz] * num
233
+
234
+ lst += [sz - 1] * (n - num)
235
+
236
+ return torch.split(tensor, lst, axis)
237
+
238
+
239
+ def _split_helper_list(tensor, indices_or_sections, axis):
240
+ if not isinstance(indices_or_sections, list):
241
+ raise NotImplementedError("split: indices_or_sections: list")
242
+ # numpy expects indices, while torch expects lengths of sections
243
+ # also, numpy appends zero-size arrays for indices above the shape[axis]
244
+ lst = [x for x in indices_or_sections if x <= tensor.shape[axis]]
245
+ num_extra = len(indices_or_sections) - len(lst)
246
+
247
+ lst.append(tensor.shape[axis])
248
+ lst = [
249
+ lst[0],
250
+ ] + [a - b for a, b in zip(lst[1:], lst[:-1])]
251
+ lst += [0] * num_extra
252
+
253
+ return torch.split(tensor, lst, axis)
254
+
255
+
256
+ def array_split(ary: ArrayLike, indices_or_sections, axis=0):
257
+ return _split_helper(ary, indices_or_sections, axis)
258
+
259
+
260
+ def split(ary: ArrayLike, indices_or_sections, axis=0):
261
+ return _split_helper(ary, indices_or_sections, axis, strict=True)
262
+
263
+
264
+ def hsplit(ary: ArrayLike, indices_or_sections):
265
+ if ary.ndim == 0:
266
+ raise ValueError("hsplit only works on arrays of 1 or more dimensions")
267
+ axis = 1 if ary.ndim > 1 else 0
268
+ return _split_helper(ary, indices_or_sections, axis, strict=True)
269
+
270
+
271
+ def vsplit(ary: ArrayLike, indices_or_sections):
272
+ if ary.ndim < 2:
273
+ raise ValueError("vsplit only works on arrays of 2 or more dimensions")
274
+ return _split_helper(ary, indices_or_sections, 0, strict=True)
275
+
276
+
277
+ def dsplit(ary: ArrayLike, indices_or_sections):
278
+ if ary.ndim < 3:
279
+ raise ValueError("dsplit only works on arrays of 3 or more dimensions")
280
+ return _split_helper(ary, indices_or_sections, 2, strict=True)
281
+
282
+
283
+ def kron(a: ArrayLike, b: ArrayLike):
284
+ return torch.kron(a, b)
285
+
286
+
287
+ def vander(x: ArrayLike, N=None, increasing=False):
288
+ return torch.vander(x, N, increasing)
289
+
290
+
291
+ # ### linspace, geomspace, logspace and arange ###
292
+
293
+
294
+ def linspace(
295
+ start: ArrayLike,
296
+ stop: ArrayLike,
297
+ num=50,
298
+ endpoint=True,
299
+ retstep=False,
300
+ dtype: Optional[DTypeLike] = None,
301
+ axis=0,
302
+ ):
303
+ if axis != 0 or retstep or not endpoint:
304
+ raise NotImplementedError
305
+ if dtype is None:
306
+ dtype = _dtypes_impl.default_dtypes().float_dtype
307
+ # XXX: raises TypeError if start or stop are not scalars
308
+ return torch.linspace(start, stop, num, dtype=dtype)
309
+
310
+
311
+ def geomspace(
312
+ start: ArrayLike,
313
+ stop: ArrayLike,
314
+ num=50,
315
+ endpoint=True,
316
+ dtype: Optional[DTypeLike] = None,
317
+ axis=0,
318
+ ):
319
+ if axis != 0 or not endpoint:
320
+ raise NotImplementedError
321
+ base = torch.pow(stop / start, 1.0 / (num - 1))
322
+ logbase = torch.log(base)
323
+ return torch.logspace(
324
+ torch.log(start) / logbase,
325
+ torch.log(stop) / logbase,
326
+ num,
327
+ base=base,
328
+ )
329
+
330
+
331
+ def logspace(
332
+ start,
333
+ stop,
334
+ num=50,
335
+ endpoint=True,
336
+ base=10.0,
337
+ dtype: Optional[DTypeLike] = None,
338
+ axis=0,
339
+ ):
340
+ if axis != 0 or not endpoint:
341
+ raise NotImplementedError
342
+ return torch.logspace(start, stop, num, base=base, dtype=dtype)
343
+
344
+
345
+ def arange(
346
+ start: Optional[ArrayLikeOrScalar] = None,
347
+ stop: Optional[ArrayLikeOrScalar] = None,
348
+ step: Optional[ArrayLikeOrScalar] = 1,
349
+ dtype: Optional[DTypeLike] = None,
350
+ *,
351
+ like: NotImplementedType = None,
352
+ ):
353
+ if step == 0:
354
+ raise ZeroDivisionError
355
+ if stop is None and start is None:
356
+ raise TypeError
357
+ if stop is None:
358
+ # XXX: this breaks if start is passed as a kwarg:
359
+ # arange(start=4) should raise (no stop) but doesn't
360
+ start, stop = 0, start
361
+ if start is None:
362
+ start = 0
363
+
364
+ # the dtype of the result
365
+ if dtype is None:
366
+ dtype = (
367
+ _dtypes_impl.default_dtypes().float_dtype
368
+ if any(_dtypes_impl.is_float_or_fp_tensor(x) for x in (start, stop, step))
369
+ else _dtypes_impl.default_dtypes().int_dtype
370
+ )
371
+ work_dtype = torch.float64 if dtype.is_complex else dtype
372
+
373
+ # RuntimeError: "lt_cpu" not implemented for 'ComplexFloat'. Fall back to eager.
374
+ if any(_dtypes_impl.is_complex_or_complex_tensor(x) for x in (start, stop, step)):
375
+ raise NotImplementedError
376
+
377
+ if (step > 0 and start > stop) or (step < 0 and start < stop):
378
+ # empty range
379
+ return torch.empty(0, dtype=dtype)
380
+
381
+ result = torch.arange(start, stop, step, dtype=work_dtype)
382
+ result = _util.cast_if_needed(result, dtype)
383
+ return result
384
+
385
+
386
+ # ### zeros/ones/empty/full ###
387
+
388
+
389
+ def empty(
390
+ shape,
391
+ dtype: Optional[DTypeLike] = None,
392
+ order: NotImplementedType = "C",
393
+ *,
394
+ like: NotImplementedType = None,
395
+ ):
396
+ if dtype is None:
397
+ dtype = _dtypes_impl.default_dtypes().float_dtype
398
+ return torch.empty(shape, dtype=dtype)
399
+
400
+
401
+ # NB: *_like functions deliberately deviate from numpy: it has subok=True
402
+ # as the default; we set subok=False and raise on anything else.
403
+
404
+
405
+ def empty_like(
406
+ prototype: ArrayLike,
407
+ dtype: Optional[DTypeLike] = None,
408
+ order: NotImplementedType = "K",
409
+ subok: NotImplementedType = False,
410
+ shape=None,
411
+ ):
412
+ result = torch.empty_like(prototype, dtype=dtype)
413
+ if shape is not None:
414
+ result = result.reshape(shape)
415
+ return result
416
+
417
+
418
+ def full(
419
+ shape,
420
+ fill_value: ArrayLike,
421
+ dtype: Optional[DTypeLike] = None,
422
+ order: NotImplementedType = "C",
423
+ *,
424
+ like: NotImplementedType = None,
425
+ ):
426
+ if isinstance(shape, int):
427
+ shape = (shape,)
428
+ if dtype is None:
429
+ dtype = fill_value.dtype
430
+ if not isinstance(shape, (tuple, list)):
431
+ shape = (shape,)
432
+ return torch.full(shape, fill_value, dtype=dtype)
433
+
434
+
435
+ def full_like(
436
+ a: ArrayLike,
437
+ fill_value,
438
+ dtype: Optional[DTypeLike] = None,
439
+ order: NotImplementedType = "K",
440
+ subok: NotImplementedType = False,
441
+ shape=None,
442
+ ):
443
+ # XXX: fill_value broadcasts
444
+ result = torch.full_like(a, fill_value, dtype=dtype)
445
+ if shape is not None:
446
+ result = result.reshape(shape)
447
+ return result
448
+
449
+
450
+ def ones(
451
+ shape,
452
+ dtype: Optional[DTypeLike] = None,
453
+ order: NotImplementedType = "C",
454
+ *,
455
+ like: NotImplementedType = None,
456
+ ):
457
+ if dtype is None:
458
+ dtype = _dtypes_impl.default_dtypes().float_dtype
459
+ return torch.ones(shape, dtype=dtype)
460
+
461
+
462
+ def ones_like(
463
+ a: ArrayLike,
464
+ dtype: Optional[DTypeLike] = None,
465
+ order: NotImplementedType = "K",
466
+ subok: NotImplementedType = False,
467
+ shape=None,
468
+ ):
469
+ result = torch.ones_like(a, dtype=dtype)
470
+ if shape is not None:
471
+ result = result.reshape(shape)
472
+ return result
473
+
474
+
475
+ def zeros(
476
+ shape,
477
+ dtype: Optional[DTypeLike] = None,
478
+ order: NotImplementedType = "C",
479
+ *,
480
+ like: NotImplementedType = None,
481
+ ):
482
+ if dtype is None:
483
+ dtype = _dtypes_impl.default_dtypes().float_dtype
484
+ return torch.zeros(shape, dtype=dtype)
485
+
486
+
487
+ def zeros_like(
488
+ a: ArrayLike,
489
+ dtype: Optional[DTypeLike] = None,
490
+ order: NotImplementedType = "K",
491
+ subok: NotImplementedType = False,
492
+ shape=None,
493
+ ):
494
+ result = torch.zeros_like(a, dtype=dtype)
495
+ if shape is not None:
496
+ result = result.reshape(shape)
497
+ return result
498
+
499
+
500
+ # ### cov & corrcoef ###
501
+
502
+
503
+ def _xy_helper_corrcoef(x_tensor, y_tensor=None, rowvar=True):
504
+ """Prepare inputs for cov and corrcoef."""
505
+
506
+ # https://github.com/numpy/numpy/blob/v1.24.0/numpy/lib/function_base.py#L2636
507
+ if y_tensor is not None:
508
+ # make sure x and y are at least 2D
509
+ ndim_extra = 2 - x_tensor.ndim
510
+ if ndim_extra > 0:
511
+ x_tensor = x_tensor.view((1,) * ndim_extra + x_tensor.shape)
512
+ if not rowvar and x_tensor.shape[0] != 1:
513
+ x_tensor = x_tensor.mT
514
+ x_tensor = x_tensor.clone()
515
+
516
+ ndim_extra = 2 - y_tensor.ndim
517
+ if ndim_extra > 0:
518
+ y_tensor = y_tensor.view((1,) * ndim_extra + y_tensor.shape)
519
+ if not rowvar and y_tensor.shape[0] != 1:
520
+ y_tensor = y_tensor.mT
521
+ y_tensor = y_tensor.clone()
522
+
523
+ x_tensor = _concatenate((x_tensor, y_tensor), axis=0)
524
+
525
+ return x_tensor
526
+
527
+
528
+ def corrcoef(
529
+ x: ArrayLike,
530
+ y: Optional[ArrayLike] = None,
531
+ rowvar=True,
532
+ bias=None,
533
+ ddof=None,
534
+ *,
535
+ dtype: Optional[DTypeLike] = None,
536
+ ):
537
+ if bias is not None or ddof is not None:
538
+ # deprecated in NumPy
539
+ raise NotImplementedError
540
+ xy_tensor = _xy_helper_corrcoef(x, y, rowvar)
541
+
542
+ is_half = (xy_tensor.dtype == torch.float16) and xy_tensor.is_cpu
543
+ if is_half:
544
+ # work around torch's "addmm_impl_cpu_" not implemented for 'Half'"
545
+ dtype = torch.float32
546
+
547
+ xy_tensor = _util.cast_if_needed(xy_tensor, dtype)
548
+ result = torch.corrcoef(xy_tensor)
549
+
550
+ if is_half:
551
+ result = result.to(torch.float16)
552
+
553
+ return result
554
+
555
+
556
+ def cov(
557
+ m: ArrayLike,
558
+ y: Optional[ArrayLike] = None,
559
+ rowvar=True,
560
+ bias=False,
561
+ ddof=None,
562
+ fweights: Optional[ArrayLike] = None,
563
+ aweights: Optional[ArrayLike] = None,
564
+ *,
565
+ dtype: Optional[DTypeLike] = None,
566
+ ):
567
+ m = _xy_helper_corrcoef(m, y, rowvar)
568
+
569
+ if ddof is None:
570
+ ddof = 1 if bias == 0 else 0
571
+
572
+ is_half = (m.dtype == torch.float16) and m.is_cpu
573
+ if is_half:
574
+ # work around torch's "addmm_impl_cpu_" not implemented for 'Half'"
575
+ dtype = torch.float32
576
+
577
+ m = _util.cast_if_needed(m, dtype)
578
+ result = torch.cov(m, correction=ddof, aweights=aweights, fweights=fweights)
579
+
580
+ if is_half:
581
+ result = result.to(torch.float16)
582
+
583
+ return result
584
+
585
+
586
+ def _conv_corr_impl(a, v, mode):
587
+ dt = _dtypes_impl.result_type_impl(a, v)
588
+ a = _util.cast_if_needed(a, dt)
589
+ v = _util.cast_if_needed(v, dt)
590
+
591
+ padding = v.shape[0] - 1 if mode == "full" else mode
592
+
593
+ if padding == "same" and v.shape[0] % 2 == 0:
594
+ # UserWarning: Using padding='same' with even kernel lengths and odd
595
+ # dilation may require a zero-padded copy of the input be created
596
+ # (Triggered internally at pytorch/aten/src/ATen/native/Convolution.cpp:1010.)
597
+ raise NotImplementedError("mode='same' and even-length weights")
598
+
599
+ # NumPy only accepts 1D arrays; PyTorch requires 2D inputs and 3D weights
600
+ aa = a[None, :]
601
+ vv = v[None, None, :]
602
+
603
+ result = torch.nn.functional.conv1d(aa, vv, padding=padding)
604
+
605
+ # torch returns a 2D result, numpy returns a 1D array
606
+ return result[0, :]
607
+
608
+
609
+ def convolve(a: ArrayLike, v: ArrayLike, mode="full"):
610
+ # NumPy: if v is longer than a, the arrays are swapped before computation
611
+ if a.shape[0] < v.shape[0]:
612
+ a, v = v, a
613
+
614
+ # flip the weights since numpy does and torch does not
615
+ v = torch.flip(v, (0,))
616
+
617
+ return _conv_corr_impl(a, v, mode)
618
+
619
+
620
+ def correlate(a: ArrayLike, v: ArrayLike, mode="valid"):
621
+ v = torch.conj_physical(v)
622
+ return _conv_corr_impl(a, v, mode)
623
+
624
+
625
+ # ### logic & element selection ###
626
+
627
+
628
+ def bincount(x: ArrayLike, /, weights: Optional[ArrayLike] = None, minlength=0):
629
+ if x.numel() == 0:
630
+ # edge case allowed by numpy
631
+ x = x.new_empty(0, dtype=int)
632
+
633
+ int_dtype = _dtypes_impl.default_dtypes().int_dtype
634
+ (x,) = _util.typecast_tensors((x,), int_dtype, casting="safe")
635
+
636
+ return torch.bincount(x, weights, minlength)
637
+
638
+
639
+ def where(
640
+ condition: ArrayLike,
641
+ x: Optional[ArrayLikeOrScalar] = None,
642
+ y: Optional[ArrayLikeOrScalar] = None,
643
+ /,
644
+ ):
645
+ if (x is None) != (y is None):
646
+ raise ValueError("either both or neither of x and y should be given")
647
+
648
+ if condition.dtype != torch.bool:
649
+ condition = condition.to(torch.bool)
650
+
651
+ if x is None and y is None:
652
+ result = torch.where(condition)
653
+ else:
654
+ result = torch.where(condition, x, y)
655
+ return result
656
+
657
+
658
+ # ###### module-level queries of object properties
659
+
660
+
661
+ def ndim(a: ArrayLike):
662
+ return a.ndim
663
+
664
+
665
+ def shape(a: ArrayLike):
666
+ return tuple(a.shape)
667
+
668
+
669
+ def size(a: ArrayLike, axis=None):
670
+ if axis is None:
671
+ return a.numel()
672
+ else:
673
+ return a.shape[axis]
674
+
675
+
676
+ # ###### shape manipulations and indexing
677
+
678
+
679
+ def expand_dims(a: ArrayLike, axis):
680
+ shape = _util.expand_shape(a.shape, axis)
681
+ return a.view(shape) # never copies
682
+
683
+
684
+ def flip(m: ArrayLike, axis=None):
685
+ # XXX: semantic difference: np.flip returns a view, torch.flip copies
686
+ if axis is None:
687
+ axis = tuple(range(m.ndim))
688
+ else:
689
+ axis = _util.normalize_axis_tuple(axis, m.ndim)
690
+ return torch.flip(m, axis)
691
+
692
+
693
+ def flipud(m: ArrayLike):
694
+ return torch.flipud(m)
695
+
696
+
697
+ def fliplr(m: ArrayLike):
698
+ return torch.fliplr(m)
699
+
700
+
701
+ def rot90(m: ArrayLike, k=1, axes=(0, 1)):
702
+ axes = _util.normalize_axis_tuple(axes, m.ndim)
703
+ return torch.rot90(m, k, axes)
704
+
705
+
706
+ # ### broadcasting and indices ###
707
+
708
+
709
+ def broadcast_to(array: ArrayLike, shape, subok: NotImplementedType = False):
710
+ return torch.broadcast_to(array, size=shape)
711
+
712
+
713
+ # This is a function from tuples to tuples, so we just reuse it
714
+ from torch import broadcast_shapes
715
+
716
+
717
+ def broadcast_arrays(*args: ArrayLike, subok: NotImplementedType = False):
718
+ return torch.broadcast_tensors(*args)
719
+
720
+
721
+ def meshgrid(*xi: ArrayLike, copy=True, sparse=False, indexing="xy"):
722
+ ndim = len(xi)
723
+
724
+ if indexing not in ["xy", "ij"]:
725
+ raise ValueError("Valid values for `indexing` are 'xy' and 'ij'.")
726
+
727
+ s0 = (1,) * ndim
728
+ output = [x.reshape(s0[:i] + (-1,) + s0[i + 1 :]) for i, x in enumerate(xi)]
729
+
730
+ if indexing == "xy" and ndim > 1:
731
+ # switch first and second axis
732
+ output[0] = output[0].reshape((1, -1) + s0[2:])
733
+ output[1] = output[1].reshape((-1, 1) + s0[2:])
734
+
735
+ if not sparse:
736
+ # Return the full N-D matrix (not only the 1-D vector)
737
+ output = torch.broadcast_tensors(*output)
738
+
739
+ if copy:
740
+ output = [x.clone() for x in output]
741
+
742
+ return list(output) # match numpy, return a list
743
+
744
+
745
+ def indices(dimensions, dtype: Optional[DTypeLike] = int, sparse=False):
746
+ # https://github.com/numpy/numpy/blob/v1.24.0/numpy/core/numeric.py#L1691-L1791
747
+ dimensions = tuple(dimensions)
748
+ N = len(dimensions)
749
+ shape = (1,) * N
750
+ if sparse:
751
+ res = tuple()
752
+ else:
753
+ res = torch.empty((N,) + dimensions, dtype=dtype)
754
+ for i, dim in enumerate(dimensions):
755
+ idx = torch.arange(dim, dtype=dtype).reshape(
756
+ shape[:i] + (dim,) + shape[i + 1 :]
757
+ )
758
+ if sparse:
759
+ res = res + (idx,)
760
+ else:
761
+ res[i] = idx
762
+ return res
763
+
764
+
765
+ # ### tri*-something ###
766
+
767
+
768
+ def tril(m: ArrayLike, k=0):
769
+ return torch.tril(m, k)
770
+
771
+
772
+ def triu(m: ArrayLike, k=0):
773
+ return torch.triu(m, k)
774
+
775
+
776
+ def tril_indices(n, k=0, m=None):
777
+ if m is None:
778
+ m = n
779
+ return torch.tril_indices(n, m, offset=k)
780
+
781
+
782
+ def triu_indices(n, k=0, m=None):
783
+ if m is None:
784
+ m = n
785
+ return torch.triu_indices(n, m, offset=k)
786
+
787
+
788
+ def tril_indices_from(arr: ArrayLike, k=0):
789
+ if arr.ndim != 2:
790
+ raise ValueError("input array must be 2-d")
791
+ # Return a tensor rather than a tuple to avoid a graphbreak
792
+ return torch.tril_indices(arr.shape[0], arr.shape[1], offset=k)
793
+
794
+
795
+ def triu_indices_from(arr: ArrayLike, k=0):
796
+ if arr.ndim != 2:
797
+ raise ValueError("input array must be 2-d")
798
+ # Return a tensor rather than a tuple to avoid a graphbreak
799
+ return torch.triu_indices(arr.shape[0], arr.shape[1], offset=k)
800
+
801
+
802
+ def tri(
803
+ N,
804
+ M=None,
805
+ k=0,
806
+ dtype: Optional[DTypeLike] = None,
807
+ *,
808
+ like: NotImplementedType = None,
809
+ ):
810
+ if M is None:
811
+ M = N
812
+ tensor = torch.ones((N, M), dtype=dtype)
813
+ return torch.tril(tensor, diagonal=k)
814
+
815
+
816
+ # ### equality, equivalence, allclose ###
817
+
818
+
819
+ def isclose(a: ArrayLike, b: ArrayLike, rtol=1.0e-5, atol=1.0e-8, equal_nan=False):
820
+ dtype = _dtypes_impl.result_type_impl(a, b)
821
+ a = _util.cast_if_needed(a, dtype)
822
+ b = _util.cast_if_needed(b, dtype)
823
+ return torch.isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)
824
+
825
+
826
+ def allclose(a: ArrayLike, b: ArrayLike, rtol=1e-05, atol=1e-08, equal_nan=False):
827
+ dtype = _dtypes_impl.result_type_impl(a, b)
828
+ a = _util.cast_if_needed(a, dtype)
829
+ b = _util.cast_if_needed(b, dtype)
830
+ return torch.allclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)
831
+
832
+
833
+ def _tensor_equal(a1, a2, equal_nan=False):
834
+ # Implementation of array_equal/array_equiv.
835
+ if a1.shape != a2.shape:
836
+ return False
837
+ cond = a1 == a2
838
+ if equal_nan:
839
+ cond = cond | (torch.isnan(a1) & torch.isnan(a2))
840
+ return cond.all().item()
841
+
842
+
843
+ def array_equal(a1: ArrayLike, a2: ArrayLike, equal_nan=False):
844
+ return _tensor_equal(a1, a2, equal_nan=equal_nan)
845
+
846
+
847
+ def array_equiv(a1: ArrayLike, a2: ArrayLike):
848
+ # *almost* the same as array_equal: _equiv tries to broadcast, _equal does not
849
+ try:
850
+ a1_t, a2_t = torch.broadcast_tensors(a1, a2)
851
+ except RuntimeError:
852
+ # failed to broadcast => not equivalent
853
+ return False
854
+ return _tensor_equal(a1_t, a2_t)
855
+
856
+
857
+ def nan_to_num(
858
+ x: ArrayLike, copy: NotImplementedType = True, nan=0.0, posinf=None, neginf=None
859
+ ):
860
+ # work around RuntimeError: "nan_to_num" not implemented for 'ComplexDouble'
861
+ if x.is_complex():
862
+ re = torch.nan_to_num(x.real, nan=nan, posinf=posinf, neginf=neginf)
863
+ im = torch.nan_to_num(x.imag, nan=nan, posinf=posinf, neginf=neginf)
864
+ return re + 1j * im
865
+ else:
866
+ return torch.nan_to_num(x, nan=nan, posinf=posinf, neginf=neginf)
867
+
868
+
869
+ # ### put/take_along_axis ###
870
+
871
+
872
+ def take(
873
+ a: ArrayLike,
874
+ indices: ArrayLike,
875
+ axis=None,
876
+ out: Optional[OutArray] = None,
877
+ mode: NotImplementedType = "raise",
878
+ ):
879
+ (a,), axis = _util.axis_none_flatten(a, axis=axis)
880
+ axis = _util.normalize_axis_index(axis, a.ndim)
881
+ idx = (slice(None),) * axis + (indices, ...)
882
+ result = a[idx]
883
+ return result
884
+
885
+
886
+ def take_along_axis(arr: ArrayLike, indices: ArrayLike, axis):
887
+ (arr,), axis = _util.axis_none_flatten(arr, axis=axis)
888
+ axis = _util.normalize_axis_index(axis, arr.ndim)
889
+ return torch.take_along_dim(arr, indices, axis)
890
+
891
+
892
+ def put(
893
+ a: NDArray,
894
+ indices: ArrayLike,
895
+ values: ArrayLike,
896
+ mode: NotImplementedType = "raise",
897
+ ):
898
+ v = values.type(a.dtype)
899
+ # If indices is larger than v, expand v to at least the size of indices. Any
900
+ # unnecessary trailing elements are then trimmed.
901
+ if indices.numel() > v.numel():
902
+ ratio = (indices.numel() + v.numel() - 1) // v.numel()
903
+ v = v.unsqueeze(0).expand((ratio,) + v.shape)
904
+ # Trim unnecessary elements, regardless if v was expanded or not. Note
905
+ # np.put() trims v to match indices by default too.
906
+ if indices.numel() < v.numel():
907
+ v = v.flatten()
908
+ v = v[: indices.numel()]
909
+ a.put_(indices, v)
910
+ return None
911
+
912
+
913
+ def put_along_axis(arr: ArrayLike, indices: ArrayLike, values: ArrayLike, axis):
914
+ (arr,), axis = _util.axis_none_flatten(arr, axis=axis)
915
+ axis = _util.normalize_axis_index(axis, arr.ndim)
916
+
917
+ indices, values = torch.broadcast_tensors(indices, values)
918
+ values = _util.cast_if_needed(values, arr.dtype)
919
+ result = torch.scatter(arr, axis, indices, values)
920
+ arr.copy_(result.reshape(arr.shape))
921
+ return None
922
+
923
+
924
+ def choose(
925
+ a: ArrayLike,
926
+ choices: Sequence[ArrayLike],
927
+ out: Optional[OutArray] = None,
928
+ mode: NotImplementedType = "raise",
929
+ ):
930
+ # First, broadcast elements of `choices`
931
+ choices = torch.stack(torch.broadcast_tensors(*choices))
932
+
933
+ # Use an analog of `gather(choices, 0, a)` which broadcasts `choices` vs `a`:
934
+ # (taken from https://github.com/pytorch/pytorch/issues/9407#issuecomment-1427907939)
935
+ idx_list = [
936
+ torch.arange(dim).view((1,) * i + (dim,) + (1,) * (choices.ndim - i - 1))
937
+ for i, dim in enumerate(choices.shape)
938
+ ]
939
+
940
+ idx_list[0] = a
941
+ return choices[idx_list].squeeze(0)
942
+
943
+
944
+ # ### unique et al. ###
945
+
946
+
947
+ def unique(
948
+ ar: ArrayLike,
949
+ return_index: NotImplementedType = False,
950
+ return_inverse=False,
951
+ return_counts=False,
952
+ axis=None,
953
+ *,
954
+ equal_nan: NotImplementedType = True,
955
+ ):
956
+ (ar,), axis = _util.axis_none_flatten(ar, axis=axis)
957
+ axis = _util.normalize_axis_index(axis, ar.ndim)
958
+
959
+ result = torch.unique(
960
+ ar, return_inverse=return_inverse, return_counts=return_counts, dim=axis
961
+ )
962
+
963
+ return result
964
+
965
+
966
+ def nonzero(a: ArrayLike):
967
+ return torch.nonzero(a, as_tuple=True)
968
+
969
+
970
+ def argwhere(a: ArrayLike):
971
+ return torch.argwhere(a)
972
+
973
+
974
+ def flatnonzero(a: ArrayLike):
975
+ return torch.flatten(a).nonzero(as_tuple=True)[0]
976
+
977
+
978
+ def clip(
979
+ a: ArrayLike,
980
+ min: Optional[ArrayLike] = None,
981
+ max: Optional[ArrayLike] = None,
982
+ out: Optional[OutArray] = None,
983
+ ):
984
+ return torch.clamp(a, min, max)
985
+
986
+
987
+ def repeat(a: ArrayLike, repeats: ArrayLikeOrScalar, axis=None):
988
+ return torch.repeat_interleave(a, repeats, axis)
989
+
990
+
991
+ def tile(A: ArrayLike, reps):
992
+ if isinstance(reps, int):
993
+ reps = (reps,)
994
+ return torch.tile(A, reps)
995
+
996
+
997
+ def resize(a: ArrayLike, new_shape=None):
998
+ # implementation vendored from
999
+ # https://github.com/numpy/numpy/blob/v1.24.0/numpy/core/fromnumeric.py#L1420-L1497
1000
+ if new_shape is None:
1001
+ return a
1002
+
1003
+ if isinstance(new_shape, int):
1004
+ new_shape = (new_shape,)
1005
+
1006
+ a = a.flatten()
1007
+
1008
+ new_size = 1
1009
+ for dim_length in new_shape:
1010
+ new_size *= dim_length
1011
+ if dim_length < 0:
1012
+ raise ValueError("all elements of `new_shape` must be non-negative")
1013
+
1014
+ if a.numel() == 0 or new_size == 0:
1015
+ # First case must zero fill. The second would have repeats == 0.
1016
+ return torch.zeros(new_shape, dtype=a.dtype)
1017
+
1018
+ repeats = -(-new_size // a.numel()) # ceil division
1019
+ a = concatenate((a,) * repeats)[:new_size]
1020
+
1021
+ return reshape(a, new_shape)
1022
+
1023
+
1024
+ # ### diag et al. ###
1025
+
1026
+
1027
+ def diagonal(a: ArrayLike, offset=0, axis1=0, axis2=1):
1028
+ axis1 = _util.normalize_axis_index(axis1, a.ndim)
1029
+ axis2 = _util.normalize_axis_index(axis2, a.ndim)
1030
+ return torch.diagonal(a, offset, axis1, axis2)
1031
+
1032
+
1033
+ def trace(
1034
+ a: ArrayLike,
1035
+ offset=0,
1036
+ axis1=0,
1037
+ axis2=1,
1038
+ dtype: Optional[DTypeLike] = None,
1039
+ out: Optional[OutArray] = None,
1040
+ ):
1041
+ result = torch.diagonal(a, offset, dim1=axis1, dim2=axis2).sum(-1, dtype=dtype)
1042
+ return result
1043
+
1044
+
1045
+ def eye(
1046
+ N,
1047
+ M=None,
1048
+ k=0,
1049
+ dtype: Optional[DTypeLike] = None,
1050
+ order: NotImplementedType = "C",
1051
+ *,
1052
+ like: NotImplementedType = None,
1053
+ ):
1054
+ if dtype is None:
1055
+ dtype = _dtypes_impl.default_dtypes().float_dtype
1056
+ if M is None:
1057
+ M = N
1058
+ z = torch.zeros(N, M, dtype=dtype)
1059
+ z.diagonal(k).fill_(1)
1060
+ return z
1061
+
1062
+
1063
+ def identity(n, dtype: Optional[DTypeLike] = None, *, like: NotImplementedType = None):
1064
+ return torch.eye(n, dtype=dtype)
1065
+
1066
+
1067
+ def diag(v: ArrayLike, k=0):
1068
+ return torch.diag(v, k)
1069
+
1070
+
1071
+ def diagflat(v: ArrayLike, k=0):
1072
+ return torch.diagflat(v, k)
1073
+
1074
+
1075
+ def diag_indices(n, ndim=2):
1076
+ idx = torch.arange(n)
1077
+ return (idx,) * ndim
1078
+
1079
+
1080
+ def diag_indices_from(arr: ArrayLike):
1081
+ if not arr.ndim >= 2:
1082
+ raise ValueError("input array must be at least 2-d")
1083
+ # For more than d=2, the strided formula is only valid for arrays with
1084
+ # all dimensions equal, so we check first.
1085
+ s = arr.shape
1086
+ if s[1:] != s[:-1]:
1087
+ raise ValueError("All dimensions of input must be of equal length")
1088
+ return diag_indices(s[0], arr.ndim)
1089
+
1090
+
1091
+ def fill_diagonal(a: ArrayLike, val: ArrayLike, wrap=False):
1092
+ if a.ndim < 2:
1093
+ raise ValueError("array must be at least 2-d")
1094
+ if val.numel() == 0 and not wrap:
1095
+ a.fill_diagonal_(val)
1096
+ return a
1097
+
1098
+ if val.ndim == 0:
1099
+ val = val.unsqueeze(0)
1100
+
1101
+ # torch.Tensor.fill_diagonal_ only accepts scalars
1102
+ # If the size of val is too large, then val is trimmed
1103
+ if a.ndim == 2:
1104
+ tall = a.shape[0] > a.shape[1]
1105
+ # wrap does nothing for wide matrices...
1106
+ if not wrap or not tall:
1107
+ # Never wraps
1108
+ diag = a.diagonal()
1109
+ diag.copy_(val[: diag.numel()])
1110
+ else:
1111
+ # wraps and tall... leaving one empty line between diagonals?!
1112
+ max_, min_ = a.shape
1113
+ idx = torch.arange(max_ - max_ // (min_ + 1))
1114
+ mod = idx % min_
1115
+ div = idx // min_
1116
+ a[(div * (min_ + 1) + mod, mod)] = val[: idx.numel()]
1117
+ else:
1118
+ idx = diag_indices_from(a)
1119
+ # a.shape = (n, n, ..., n)
1120
+ a[idx] = val[: a.shape[0]]
1121
+
1122
+ return a
1123
+
1124
+
1125
+ def vdot(a: ArrayLike, b: ArrayLike, /):
1126
+ # 1. torch only accepts 1D arrays, numpy flattens
1127
+ # 2. torch requires matching dtype, while numpy casts (?)
1128
+ t_a, t_b = torch.atleast_1d(a, b)
1129
+ if t_a.ndim > 1:
1130
+ t_a = t_a.flatten()
1131
+ if t_b.ndim > 1:
1132
+ t_b = t_b.flatten()
1133
+
1134
+ dtype = _dtypes_impl.result_type_impl(t_a, t_b)
1135
+ is_half = dtype == torch.float16 and (t_a.is_cpu or t_b.is_cpu)
1136
+ is_bool = dtype == torch.bool
1137
+
1138
+ # work around torch's "dot" not implemented for 'Half', 'Bool'
1139
+ if is_half:
1140
+ dtype = torch.float32
1141
+ elif is_bool:
1142
+ dtype = torch.uint8
1143
+
1144
+ t_a = _util.cast_if_needed(t_a, dtype)
1145
+ t_b = _util.cast_if_needed(t_b, dtype)
1146
+
1147
+ result = torch.vdot(t_a, t_b)
1148
+
1149
+ if is_half:
1150
+ result = result.to(torch.float16)
1151
+ elif is_bool:
1152
+ result = result.to(torch.bool)
1153
+
1154
+ return result
1155
+
1156
+
1157
+ def tensordot(a: ArrayLike, b: ArrayLike, axes=2):
1158
+ if isinstance(axes, (list, tuple)):
1159
+ axes = [[ax] if isinstance(ax, int) else ax for ax in axes]
1160
+
1161
+ target_dtype = _dtypes_impl.result_type_impl(a, b)
1162
+ a = _util.cast_if_needed(a, target_dtype)
1163
+ b = _util.cast_if_needed(b, target_dtype)
1164
+
1165
+ return torch.tensordot(a, b, dims=axes)
1166
+
1167
+
1168
+ def dot(a: ArrayLike, b: ArrayLike, out: Optional[OutArray] = None):
1169
+ dtype = _dtypes_impl.result_type_impl(a, b)
1170
+ is_bool = dtype == torch.bool
1171
+ if is_bool:
1172
+ dtype = torch.uint8
1173
+
1174
+ a = _util.cast_if_needed(a, dtype)
1175
+ b = _util.cast_if_needed(b, dtype)
1176
+
1177
+ if a.ndim == 0 or b.ndim == 0:
1178
+ result = a * b
1179
+ else:
1180
+ result = torch.matmul(a, b)
1181
+
1182
+ if is_bool:
1183
+ result = result.to(torch.bool)
1184
+
1185
+ return result
1186
+
1187
+
1188
+ def inner(a: ArrayLike, b: ArrayLike, /):
1189
+ dtype = _dtypes_impl.result_type_impl(a, b)
1190
+ is_half = dtype == torch.float16 and (a.is_cpu or b.is_cpu)
1191
+ is_bool = dtype == torch.bool
1192
+
1193
+ if is_half:
1194
+ # work around torch's "addmm_impl_cpu_" not implemented for 'Half'"
1195
+ dtype = torch.float32
1196
+ elif is_bool:
1197
+ dtype = torch.uint8
1198
+
1199
+ a = _util.cast_if_needed(a, dtype)
1200
+ b = _util.cast_if_needed(b, dtype)
1201
+
1202
+ result = torch.inner(a, b)
1203
+
1204
+ if is_half:
1205
+ result = result.to(torch.float16)
1206
+ elif is_bool:
1207
+ result = result.to(torch.bool)
1208
+ return result
1209
+
1210
+
1211
+ def outer(a: ArrayLike, b: ArrayLike, out: Optional[OutArray] = None):
1212
+ return torch.outer(a, b)
1213
+
1214
+
1215
+ def cross(a: ArrayLike, b: ArrayLike, axisa=-1, axisb=-1, axisc=-1, axis=None):
1216
+ # implementation vendored from
1217
+ # https://github.com/numpy/numpy/blob/v1.24.0/numpy/core/numeric.py#L1486-L1685
1218
+ if axis is not None:
1219
+ axisa, axisb, axisc = (axis,) * 3
1220
+
1221
+ # Check axisa and axisb are within bounds
1222
+ axisa = _util.normalize_axis_index(axisa, a.ndim)
1223
+ axisb = _util.normalize_axis_index(axisb, b.ndim)
1224
+
1225
+ # Move working axis to the end of the shape
1226
+ a = torch.moveaxis(a, axisa, -1)
1227
+ b = torch.moveaxis(b, axisb, -1)
1228
+ msg = "incompatible dimensions for cross product\n(dimension must be 2 or 3)"
1229
+ if a.shape[-1] not in (2, 3) or b.shape[-1] not in (2, 3):
1230
+ raise ValueError(msg)
1231
+
1232
+ # Create the output array
1233
+ shape = broadcast_shapes(a[..., 0].shape, b[..., 0].shape)
1234
+ if a.shape[-1] == 3 or b.shape[-1] == 3:
1235
+ shape += (3,)
1236
+ # Check axisc is within bounds
1237
+ axisc = _util.normalize_axis_index(axisc, len(shape))
1238
+ dtype = _dtypes_impl.result_type_impl(a, b)
1239
+ cp = torch.empty(shape, dtype=dtype)
1240
+
1241
+ # recast arrays as dtype
1242
+ a = _util.cast_if_needed(a, dtype)
1243
+ b = _util.cast_if_needed(b, dtype)
1244
+
1245
+ # create local aliases for readability
1246
+ a0 = a[..., 0]
1247
+ a1 = a[..., 1]
1248
+ if a.shape[-1] == 3:
1249
+ a2 = a[..., 2]
1250
+ b0 = b[..., 0]
1251
+ b1 = b[..., 1]
1252
+ if b.shape[-1] == 3:
1253
+ b2 = b[..., 2]
1254
+ if cp.ndim != 0 and cp.shape[-1] == 3:
1255
+ cp0 = cp[..., 0]
1256
+ cp1 = cp[..., 1]
1257
+ cp2 = cp[..., 2]
1258
+
1259
+ if a.shape[-1] == 2:
1260
+ if b.shape[-1] == 2:
1261
+ # a0 * b1 - a1 * b0
1262
+ cp[...] = a0 * b1 - a1 * b0
1263
+ return cp
1264
+ else:
1265
+ assert b.shape[-1] == 3
1266
+ # cp0 = a1 * b2 - 0 (a2 = 0)
1267
+ # cp1 = 0 - a0 * b2 (a2 = 0)
1268
+ # cp2 = a0 * b1 - a1 * b0
1269
+ cp0[...] = a1 * b2
1270
+ cp1[...] = -a0 * b2
1271
+ cp2[...] = a0 * b1 - a1 * b0
1272
+ else:
1273
+ assert a.shape[-1] == 3
1274
+ if b.shape[-1] == 3:
1275
+ cp0[...] = a1 * b2 - a2 * b1
1276
+ cp1[...] = a2 * b0 - a0 * b2
1277
+ cp2[...] = a0 * b1 - a1 * b0
1278
+ else:
1279
+ assert b.shape[-1] == 2
1280
+ cp0[...] = -a2 * b1
1281
+ cp1[...] = a2 * b0
1282
+ cp2[...] = a0 * b1 - a1 * b0
1283
+
1284
+ return torch.moveaxis(cp, -1, axisc)
1285
+
1286
+
1287
+ def einsum(*operands, out=None, dtype=None, order="K", casting="safe", optimize=False):
1288
+ # Have to manually normalize *operands and **kwargs, following the NumPy signature
1289
+ # We have a local import to avoid poluting the global space, as it will be then
1290
+ # exported in funcs.py
1291
+ from ._ndarray import ndarray
1292
+ from ._normalizations import (
1293
+ maybe_copy_to,
1294
+ normalize_array_like,
1295
+ normalize_casting,
1296
+ normalize_dtype,
1297
+ wrap_tensors,
1298
+ )
1299
+
1300
+ dtype = normalize_dtype(dtype)
1301
+ casting = normalize_casting(casting)
1302
+ if out is not None and not isinstance(out, ndarray):
1303
+ raise TypeError("'out' must be an array")
1304
+ if order != "K":
1305
+ raise NotImplementedError("'order' parameter is not supported.")
1306
+
1307
+ # parse arrays and normalize them
1308
+ sublist_format = not isinstance(operands[0], str)
1309
+ if sublist_format:
1310
+ # op, str, op, str ... [sublistout] format: normalize every other argument
1311
+
1312
+ # - if sublistout is not given, the length of operands is even, and we pick
1313
+ # odd-numbered elements, which are arrays.
1314
+ # - if sublistout is given, the length of operands is odd, we peel off
1315
+ # the last one, and pick odd-numbered elements, which are arrays.
1316
+ # Without [:-1], we would have picked sublistout, too.
1317
+ array_operands = operands[:-1][::2]
1318
+ else:
1319
+ # ("ij->", arrays) format
1320
+ subscripts, array_operands = operands[0], operands[1:]
1321
+
1322
+ tensors = [normalize_array_like(op) for op in array_operands]
1323
+ target_dtype = _dtypes_impl.result_type_impl(*tensors) if dtype is None else dtype
1324
+
1325
+ # work around 'bmm' not implemented for 'Half' etc
1326
+ is_half = target_dtype == torch.float16 and all(t.is_cpu for t in tensors)
1327
+ if is_half:
1328
+ target_dtype = torch.float32
1329
+
1330
+ is_short_int = target_dtype in [torch.uint8, torch.int8, torch.int16, torch.int32]
1331
+ if is_short_int:
1332
+ target_dtype = torch.int64
1333
+
1334
+ tensors = _util.typecast_tensors(tensors, target_dtype, casting)
1335
+
1336
+ from torch.backends import opt_einsum
1337
+
1338
+ try:
1339
+ # set the global state to handle the optimize=... argument, restore on exit
1340
+ if opt_einsum.is_available():
1341
+ old_strategy = torch.backends.opt_einsum.strategy
1342
+ old_enabled = torch.backends.opt_einsum.enabled
1343
+
1344
+ # torch.einsum calls opt_einsum.contract_path, which runs into
1345
+ # https://github.com/dgasmith/opt_einsum/issues/219
1346
+ # for strategy={True, False}
1347
+ if optimize is True:
1348
+ optimize = "auto"
1349
+ elif optimize is False:
1350
+ torch.backends.opt_einsum.enabled = False
1351
+
1352
+ torch.backends.opt_einsum.strategy = optimize
1353
+
1354
+ if sublist_format:
1355
+ # recombine operands
1356
+ sublists = operands[1::2]
1357
+ has_sublistout = len(operands) % 2 == 1
1358
+ if has_sublistout:
1359
+ sublistout = operands[-1]
1360
+ operands = list(itertools.chain.from_iterable(zip(tensors, sublists)))
1361
+ if has_sublistout:
1362
+ operands.append(sublistout)
1363
+
1364
+ result = torch.einsum(*operands)
1365
+ else:
1366
+ result = torch.einsum(subscripts, *tensors)
1367
+
1368
+ finally:
1369
+ if opt_einsum.is_available():
1370
+ torch.backends.opt_einsum.strategy = old_strategy
1371
+ torch.backends.opt_einsum.enabled = old_enabled
1372
+
1373
+ result = maybe_copy_to(out, result)
1374
+ return wrap_tensors(result)
1375
+
1376
+
1377
+ # ### sort and partition ###
1378
+
1379
+
1380
+ def _sort_helper(tensor, axis, kind, order):
1381
+ if tensor.dtype.is_complex:
1382
+ raise NotImplementedError(f"sorting {tensor.dtype} is not supported")
1383
+ (tensor,), axis = _util.axis_none_flatten(tensor, axis=axis)
1384
+ axis = _util.normalize_axis_index(axis, tensor.ndim)
1385
+
1386
+ stable = kind == "stable"
1387
+
1388
+ return tensor, axis, stable
1389
+
1390
+
1391
+ def sort(a: ArrayLike, axis=-1, kind=None, order: NotImplementedType = None):
1392
+ # `order` keyword arg is only relevant for structured dtypes; so not supported here.
1393
+ a, axis, stable = _sort_helper(a, axis, kind, order)
1394
+ result = torch.sort(a, dim=axis, stable=stable)
1395
+ return result.values
1396
+
1397
+
1398
+ def argsort(a: ArrayLike, axis=-1, kind=None, order: NotImplementedType = None):
1399
+ a, axis, stable = _sort_helper(a, axis, kind, order)
1400
+ return torch.argsort(a, dim=axis, stable=stable)
1401
+
1402
+
1403
+ def searchsorted(
1404
+ a: ArrayLike, v: ArrayLike, side="left", sorter: Optional[ArrayLike] = None
1405
+ ):
1406
+ if a.dtype.is_complex:
1407
+ raise NotImplementedError(f"searchsorted with dtype={a.dtype}")
1408
+
1409
+ return torch.searchsorted(a, v, side=side, sorter=sorter)
1410
+
1411
+
1412
+ # ### swap/move/roll axis ###
1413
+
1414
+
1415
+ def moveaxis(a: ArrayLike, source, destination):
1416
+ source = _util.normalize_axis_tuple(source, a.ndim, "source")
1417
+ destination = _util.normalize_axis_tuple(destination, a.ndim, "destination")
1418
+ return torch.moveaxis(a, source, destination)
1419
+
1420
+
1421
+ def swapaxes(a: ArrayLike, axis1, axis2):
1422
+ axis1 = _util.normalize_axis_index(axis1, a.ndim)
1423
+ axis2 = _util.normalize_axis_index(axis2, a.ndim)
1424
+ return torch.swapaxes(a, axis1, axis2)
1425
+
1426
+
1427
+ def rollaxis(a: ArrayLike, axis, start=0):
1428
+ # Straight vendor from:
1429
+ # https://github.com/numpy/numpy/blob/v1.24.0/numpy/core/numeric.py#L1259
1430
+ #
1431
+ # Also note this function in NumPy is mostly retained for backwards compat
1432
+ # (https://stackoverflow.com/questions/29891583/reason-why-numpy-rollaxis-is-so-confusing)
1433
+ # so let's not touch it unless hard pressed.
1434
+ n = a.ndim
1435
+ axis = _util.normalize_axis_index(axis, n)
1436
+ if start < 0:
1437
+ start += n
1438
+ msg = "'%s' arg requires %d <= %s < %d, but %d was passed in"
1439
+ if not (0 <= start < n + 1):
1440
+ raise _util.AxisError(msg % ("start", -n, "start", n + 1, start))
1441
+ if axis < start:
1442
+ # it's been removed
1443
+ start -= 1
1444
+ if axis == start:
1445
+ # numpy returns a view, here we try returning the tensor itself
1446
+ # return tensor[...]
1447
+ return a
1448
+ axes = list(range(0, n))
1449
+ axes.remove(axis)
1450
+ axes.insert(start, axis)
1451
+ return a.view(axes)
1452
+
1453
+
1454
+ def roll(a: ArrayLike, shift, axis=None):
1455
+ if axis is not None:
1456
+ axis = _util.normalize_axis_tuple(axis, a.ndim, allow_duplicate=True)
1457
+ if not isinstance(shift, tuple):
1458
+ shift = (shift,) * len(axis)
1459
+ return torch.roll(a, shift, axis)
1460
+
1461
+
1462
+ # ### shape manipulations ###
1463
+
1464
+
1465
+ def squeeze(a: ArrayLike, axis=None):
1466
+ if axis == ():
1467
+ result = a
1468
+ elif axis is None:
1469
+ result = a.squeeze()
1470
+ else:
1471
+ if isinstance(axis, tuple):
1472
+ result = a
1473
+ for ax in axis:
1474
+ result = a.squeeze(ax)
1475
+ else:
1476
+ result = a.squeeze(axis)
1477
+ return result
1478
+
1479
+
1480
+ def reshape(a: ArrayLike, newshape, order: NotImplementedType = "C"):
1481
+ # if sh = (1, 2, 3), numpy allows both .reshape(sh) and .reshape(*sh)
1482
+ newshape = newshape[0] if len(newshape) == 1 else newshape
1483
+ return a.reshape(newshape)
1484
+
1485
+
1486
+ # NB: cannot use torch.reshape(a, newshape) above, because of
1487
+ # (Pdb) torch.reshape(torch.as_tensor([1]), 1)
1488
+ # *** TypeError: reshape(): argument 'shape' (position 2) must be tuple of SymInts, not int
1489
+
1490
+
1491
+ def transpose(a: ArrayLike, axes=None):
1492
+ # numpy allows both .transpose(sh) and .transpose(*sh)
1493
+ # also older code uses axes being a list
1494
+ if axes in [(), None, (None,)]:
1495
+ axes = tuple(reversed(range(a.ndim)))
1496
+ elif len(axes) == 1:
1497
+ axes = axes[0]
1498
+ return a.permute(axes)
1499
+
1500
+
1501
+ def ravel(a: ArrayLike, order: NotImplementedType = "C"):
1502
+ return torch.flatten(a)
1503
+
1504
+
1505
+ def diff(
1506
+ a: ArrayLike,
1507
+ n=1,
1508
+ axis=-1,
1509
+ prepend: Optional[ArrayLike] = None,
1510
+ append: Optional[ArrayLike] = None,
1511
+ ):
1512
+ axis = _util.normalize_axis_index(axis, a.ndim)
1513
+
1514
+ if n < 0:
1515
+ raise ValueError(f"order must be non-negative but got {n}")
1516
+
1517
+ if n == 0:
1518
+ # match numpy and return the input immediately
1519
+ return a
1520
+
1521
+ if prepend is not None:
1522
+ shape = list(a.shape)
1523
+ shape[axis] = prepend.shape[axis] if prepend.ndim > 0 else 1
1524
+ prepend = torch.broadcast_to(prepend, shape)
1525
+
1526
+ if append is not None:
1527
+ shape = list(a.shape)
1528
+ shape[axis] = append.shape[axis] if append.ndim > 0 else 1
1529
+ append = torch.broadcast_to(append, shape)
1530
+
1531
+ return torch.diff(a, n, axis=axis, prepend=prepend, append=append)
1532
+
1533
+
1534
+ # ### math functions ###
1535
+
1536
+
1537
+ def angle(z: ArrayLike, deg=False):
1538
+ result = torch.angle(z)
1539
+ if deg:
1540
+ result = result * (180 / torch.pi)
1541
+ return result
1542
+
1543
+
1544
+ def sinc(x: ArrayLike):
1545
+ return torch.sinc(x)
1546
+
1547
+
1548
+ # NB: have to normalize *varargs manually
1549
+ def gradient(f: ArrayLike, *varargs, axis=None, edge_order=1):
1550
+ N = f.ndim # number of dimensions
1551
+
1552
+ varargs = _util.ndarrays_to_tensors(varargs)
1553
+
1554
+ if axis is None:
1555
+ axes = tuple(range(N))
1556
+ else:
1557
+ axes = _util.normalize_axis_tuple(axis, N)
1558
+
1559
+ len_axes = len(axes)
1560
+ n = len(varargs)
1561
+ if n == 0:
1562
+ # no spacing argument - use 1 in all axes
1563
+ dx = [1.0] * len_axes
1564
+ elif n == 1 and (_dtypes_impl.is_scalar(varargs[0]) or varargs[0].ndim == 0):
1565
+ # single scalar or 0D tensor for all axes (np.ndim(varargs[0]) == 0)
1566
+ dx = varargs * len_axes
1567
+ elif n == len_axes:
1568
+ # scalar or 1d array for each axis
1569
+ dx = list(varargs)
1570
+ for i, distances in enumerate(dx):
1571
+ distances = torch.as_tensor(distances)
1572
+ if distances.ndim == 0:
1573
+ continue
1574
+ elif distances.ndim != 1:
1575
+ raise ValueError("distances must be either scalars or 1d")
1576
+ if len(distances) != f.shape[axes[i]]:
1577
+ raise ValueError(
1578
+ "when 1d, distances must match "
1579
+ "the length of the corresponding dimension"
1580
+ )
1581
+ if not (distances.dtype.is_floating_point or distances.dtype.is_complex):
1582
+ distances = distances.double()
1583
+
1584
+ diffx = torch.diff(distances)
1585
+ # if distances are constant reduce to the scalar case
1586
+ # since it brings a consistent speedup
1587
+ if (diffx == diffx[0]).all():
1588
+ diffx = diffx[0]
1589
+ dx[i] = diffx
1590
+ else:
1591
+ raise TypeError("invalid number of arguments")
1592
+
1593
+ if edge_order > 2:
1594
+ raise ValueError("'edge_order' greater than 2 not supported")
1595
+
1596
+ # use central differences on interior and one-sided differences on the
1597
+ # endpoints. This preserves second order-accuracy over the full domain.
1598
+
1599
+ outvals = []
1600
+
1601
+ # create slice objects --- initially all are [:, :, ..., :]
1602
+ slice1 = [slice(None)] * N
1603
+ slice2 = [slice(None)] * N
1604
+ slice3 = [slice(None)] * N
1605
+ slice4 = [slice(None)] * N
1606
+
1607
+ otype = f.dtype
1608
+ if _dtypes_impl.python_type_for_torch(otype) in (int, bool):
1609
+ # Convert to floating point.
1610
+ # First check if f is a numpy integer type; if so, convert f to float64
1611
+ # to avoid modular arithmetic when computing the changes in f.
1612
+ f = f.double()
1613
+ otype = torch.float64
1614
+
1615
+ for axis, ax_dx in zip(axes, dx):
1616
+ if f.shape[axis] < edge_order + 1:
1617
+ raise ValueError(
1618
+ "Shape of array too small to calculate a numerical gradient, "
1619
+ "at least (edge_order + 1) elements are required."
1620
+ )
1621
+ # result allocation
1622
+ out = torch.empty_like(f, dtype=otype)
1623
+
1624
+ # spacing for the current axis (NB: np.ndim(ax_dx) == 0)
1625
+ uniform_spacing = _dtypes_impl.is_scalar(ax_dx) or ax_dx.ndim == 0
1626
+
1627
+ # Numerical differentiation: 2nd order interior
1628
+ slice1[axis] = slice(1, -1)
1629
+ slice2[axis] = slice(None, -2)
1630
+ slice3[axis] = slice(1, -1)
1631
+ slice4[axis] = slice(2, None)
1632
+
1633
+ if uniform_spacing:
1634
+ out[tuple(slice1)] = (f[tuple(slice4)] - f[tuple(slice2)]) / (2.0 * ax_dx)
1635
+ else:
1636
+ dx1 = ax_dx[0:-1]
1637
+ dx2 = ax_dx[1:]
1638
+ a = -(dx2) / (dx1 * (dx1 + dx2))
1639
+ b = (dx2 - dx1) / (dx1 * dx2)
1640
+ c = dx1 / (dx2 * (dx1 + dx2))
1641
+ # fix the shape for broadcasting
1642
+ shape = [1] * N
1643
+ shape[axis] = -1
1644
+ a = a.reshape(shape)
1645
+ b = b.reshape(shape)
1646
+ c = c.reshape(shape)
1647
+ # 1D equivalent -- out[1:-1] = a * f[:-2] + b * f[1:-1] + c * f[2:]
1648
+ out[tuple(slice1)] = (
1649
+ a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)]
1650
+ )
1651
+
1652
+ # Numerical differentiation: 1st order edges
1653
+ if edge_order == 1:
1654
+ slice1[axis] = 0
1655
+ slice2[axis] = 1
1656
+ slice3[axis] = 0
1657
+ dx_0 = ax_dx if uniform_spacing else ax_dx[0]
1658
+ # 1D equivalent -- out[0] = (f[1] - f[0]) / (x[1] - x[0])
1659
+ out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)]) / dx_0
1660
+
1661
+ slice1[axis] = -1
1662
+ slice2[axis] = -1
1663
+ slice3[axis] = -2
1664
+ dx_n = ax_dx if uniform_spacing else ax_dx[-1]
1665
+ # 1D equivalent -- out[-1] = (f[-1] - f[-2]) / (x[-1] - x[-2])
1666
+ out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)]) / dx_n
1667
+
1668
+ # Numerical differentiation: 2nd order edges
1669
+ else:
1670
+ slice1[axis] = 0
1671
+ slice2[axis] = 0
1672
+ slice3[axis] = 1
1673
+ slice4[axis] = 2
1674
+ if uniform_spacing:
1675
+ a = -1.5 / ax_dx
1676
+ b = 2.0 / ax_dx
1677
+ c = -0.5 / ax_dx
1678
+ else:
1679
+ dx1 = ax_dx[0]
1680
+ dx2 = ax_dx[1]
1681
+ a = -(2.0 * dx1 + dx2) / (dx1 * (dx1 + dx2))
1682
+ b = (dx1 + dx2) / (dx1 * dx2)
1683
+ c = -dx1 / (dx2 * (dx1 + dx2))
1684
+ # 1D equivalent -- out[0] = a * f[0] + b * f[1] + c * f[2]
1685
+ out[tuple(slice1)] = (
1686
+ a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)]
1687
+ )
1688
+
1689
+ slice1[axis] = -1
1690
+ slice2[axis] = -3
1691
+ slice3[axis] = -2
1692
+ slice4[axis] = -1
1693
+ if uniform_spacing:
1694
+ a = 0.5 / ax_dx
1695
+ b = -2.0 / ax_dx
1696
+ c = 1.5 / ax_dx
1697
+ else:
1698
+ dx1 = ax_dx[-2]
1699
+ dx2 = ax_dx[-1]
1700
+ a = (dx2) / (dx1 * (dx1 + dx2))
1701
+ b = -(dx2 + dx1) / (dx1 * dx2)
1702
+ c = (2.0 * dx2 + dx1) / (dx2 * (dx1 + dx2))
1703
+ # 1D equivalent -- out[-1] = a * f[-3] + b * f[-2] + c * f[-1]
1704
+ out[tuple(slice1)] = (
1705
+ a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)]
1706
+ )
1707
+
1708
+ outvals.append(out)
1709
+
1710
+ # reset the slice object in this dimension to ":"
1711
+ slice1[axis] = slice(None)
1712
+ slice2[axis] = slice(None)
1713
+ slice3[axis] = slice(None)
1714
+ slice4[axis] = slice(None)
1715
+
1716
+ if len_axes == 1:
1717
+ return outvals[0]
1718
+ else:
1719
+ return outvals
1720
+
1721
+
1722
+ # ### Type/shape etc queries ###
1723
+
1724
+
1725
+ def round(a: ArrayLike, decimals=0, out: Optional[OutArray] = None):
1726
+ if a.is_floating_point():
1727
+ result = torch.round(a, decimals=decimals)
1728
+ elif a.is_complex():
1729
+ # RuntimeError: "round_cpu" not implemented for 'ComplexFloat'
1730
+ result = torch.complex(
1731
+ torch.round(a.real, decimals=decimals),
1732
+ torch.round(a.imag, decimals=decimals),
1733
+ )
1734
+ else:
1735
+ # RuntimeError: "round_cpu" not implemented for 'int'
1736
+ result = a
1737
+ return result
1738
+
1739
+
1740
+ around = round
1741
+ round_ = round
1742
+
1743
+
1744
+ def real_if_close(a: ArrayLike, tol=100):
1745
+ if not torch.is_complex(a):
1746
+ return a
1747
+ if tol > 1:
1748
+ # Undocumented in numpy: if tol < 1, it's an absolute tolerance!
1749
+ # Otherwise, tol > 1 is relative tolerance, in units of the dtype epsilon
1750
+ # https://github.com/numpy/numpy/blob/v1.24.0/numpy/lib/type_check.py#L577
1751
+ tol = tol * torch.finfo(a.dtype).eps
1752
+
1753
+ mask = torch.abs(a.imag) < tol
1754
+ return a.real if mask.all() else a
1755
+
1756
+
1757
+ def real(a: ArrayLike):
1758
+ return torch.real(a)
1759
+
1760
+
1761
+ def imag(a: ArrayLike):
1762
+ if a.is_complex():
1763
+ return a.imag
1764
+ return torch.zeros_like(a)
1765
+
1766
+
1767
+ def iscomplex(x: ArrayLike):
1768
+ if torch.is_complex(x):
1769
+ return x.imag != 0
1770
+ return torch.zeros_like(x, dtype=torch.bool)
1771
+
1772
+
1773
+ def isreal(x: ArrayLike):
1774
+ if torch.is_complex(x):
1775
+ return x.imag == 0
1776
+ return torch.ones_like(x, dtype=torch.bool)
1777
+
1778
+
1779
+ def iscomplexobj(x: ArrayLike):
1780
+ return torch.is_complex(x)
1781
+
1782
+
1783
+ def isrealobj(x: ArrayLike):
1784
+ return not torch.is_complex(x)
1785
+
1786
+
1787
+ def isneginf(x: ArrayLike, out: Optional[OutArray] = None):
1788
+ return torch.isneginf(x)
1789
+
1790
+
1791
+ def isposinf(x: ArrayLike, out: Optional[OutArray] = None):
1792
+ return torch.isposinf(x)
1793
+
1794
+
1795
+ def i0(x: ArrayLike):
1796
+ return torch.special.i0(x)
1797
+
1798
+
1799
+ def isscalar(a):
1800
+ # We need to use normalize_array_like, but we don't want to export it in funcs.py
1801
+ from ._normalizations import normalize_array_like
1802
+
1803
+ try:
1804
+ t = normalize_array_like(a)
1805
+ return t.numel() == 1
1806
+ except Exception:
1807
+ return False
1808
+
1809
+
1810
+ # ### Filter windows ###
1811
+
1812
+
1813
+ def hamming(M):
1814
+ dtype = _dtypes_impl.default_dtypes().float_dtype
1815
+ return torch.hamming_window(M, periodic=False, dtype=dtype)
1816
+
1817
+
1818
+ def hanning(M):
1819
+ dtype = _dtypes_impl.default_dtypes().float_dtype
1820
+ return torch.hann_window(M, periodic=False, dtype=dtype)
1821
+
1822
+
1823
+ def kaiser(M, beta):
1824
+ dtype = _dtypes_impl.default_dtypes().float_dtype
1825
+ return torch.kaiser_window(M, beta=beta, periodic=False, dtype=dtype)
1826
+
1827
+
1828
+ def blackman(M):
1829
+ dtype = _dtypes_impl.default_dtypes().float_dtype
1830
+ return torch.blackman_window(M, periodic=False, dtype=dtype)
1831
+
1832
+
1833
+ def bartlett(M):
1834
+ dtype = _dtypes_impl.default_dtypes().float_dtype
1835
+ return torch.bartlett_window(M, periodic=False, dtype=dtype)
1836
+
1837
+
1838
+ # ### Dtype routines ###
1839
+
1840
+ # vendored from https://github.com/numpy/numpy/blob/v1.24.0/numpy/lib/type_check.py#L666
1841
+
1842
+
1843
+ array_type = [
1844
+ [torch.float16, torch.float32, torch.float64],
1845
+ [None, torch.complex64, torch.complex128],
1846
+ ]
1847
+ array_precision = {
1848
+ torch.float16: 0,
1849
+ torch.float32: 1,
1850
+ torch.float64: 2,
1851
+ torch.complex64: 1,
1852
+ torch.complex128: 2,
1853
+ }
1854
+
1855
+
1856
+ def common_type(*tensors: ArrayLike):
1857
+ is_complex = False
1858
+ precision = 0
1859
+ for a in tensors:
1860
+ t = a.dtype
1861
+ if iscomplexobj(a):
1862
+ is_complex = True
1863
+ if not (t.is_floating_point or t.is_complex):
1864
+ p = 2 # array_precision[_nx.double]
1865
+ else:
1866
+ p = array_precision.get(t, None)
1867
+ if p is None:
1868
+ raise TypeError("can't get common type for non-numeric array")
1869
+ precision = builtins.max(precision, p)
1870
+ if is_complex:
1871
+ return array_type[1][precision]
1872
+ else:
1873
+ return array_type[0][precision]
1874
+
1875
+
1876
+ # ### histograms ###
1877
+
1878
+
1879
+ def histogram(
1880
+ a: ArrayLike,
1881
+ bins: ArrayLike = 10,
1882
+ range=None,
1883
+ normed=None,
1884
+ weights: Optional[ArrayLike] = None,
1885
+ density=None,
1886
+ ):
1887
+ if normed is not None:
1888
+ raise ValueError("normed argument is deprecated, use density= instead")
1889
+
1890
+ if weights is not None and weights.dtype.is_complex:
1891
+ raise NotImplementedError("complex weights histogram.")
1892
+
1893
+ is_a_int = not (a.dtype.is_floating_point or a.dtype.is_complex)
1894
+ is_w_int = weights is None or not weights.dtype.is_floating_point
1895
+ if is_a_int:
1896
+ a = a.double()
1897
+
1898
+ if weights is not None:
1899
+ weights = _util.cast_if_needed(weights, a.dtype)
1900
+
1901
+ if isinstance(bins, torch.Tensor):
1902
+ if bins.ndim == 0:
1903
+ # bins was a single int
1904
+ bins = operator.index(bins)
1905
+ else:
1906
+ bins = _util.cast_if_needed(bins, a.dtype)
1907
+
1908
+ if range is None:
1909
+ h, b = torch.histogram(a, bins, weight=weights, density=bool(density))
1910
+ else:
1911
+ h, b = torch.histogram(
1912
+ a, bins, range=range, weight=weights, density=bool(density)
1913
+ )
1914
+
1915
+ if not density and is_w_int:
1916
+ h = h.long()
1917
+ if is_a_int:
1918
+ b = b.long()
1919
+
1920
+ return h, b
1921
+
1922
+
1923
+ def histogram2d(
1924
+ x,
1925
+ y,
1926
+ bins=10,
1927
+ range: Optional[ArrayLike] = None,
1928
+ normed=None,
1929
+ weights: Optional[ArrayLike] = None,
1930
+ density=None,
1931
+ ):
1932
+ # vendored from https://github.com/numpy/numpy/blob/v1.24.0/numpy/lib/twodim_base.py#L655-L821
1933
+ if len(x) != len(y):
1934
+ raise ValueError("x and y must have the same length.")
1935
+
1936
+ try:
1937
+ N = len(bins)
1938
+ except TypeError:
1939
+ N = 1
1940
+
1941
+ if N != 1 and N != 2:
1942
+ bins = [bins, bins]
1943
+
1944
+ h, e = histogramdd((x, y), bins, range, normed, weights, density)
1945
+
1946
+ return h, e[0], e[1]
1947
+
1948
+
1949
+ def histogramdd(
1950
+ sample,
1951
+ bins=10,
1952
+ range: Optional[ArrayLike] = None,
1953
+ normed=None,
1954
+ weights: Optional[ArrayLike] = None,
1955
+ density=None,
1956
+ ):
1957
+ # have to normalize manually because `sample` interpretation differs
1958
+ # for a list of lists and a 2D array
1959
+ if normed is not None:
1960
+ raise ValueError("normed argument is deprecated, use density= instead")
1961
+
1962
+ from ._normalizations import normalize_array_like, normalize_seq_array_like
1963
+
1964
+ if isinstance(sample, (list, tuple)):
1965
+ sample = normalize_array_like(sample).T
1966
+ else:
1967
+ sample = normalize_array_like(sample)
1968
+
1969
+ sample = torch.atleast_2d(sample)
1970
+
1971
+ if not (sample.dtype.is_floating_point or sample.dtype.is_complex):
1972
+ sample = sample.double()
1973
+
1974
+ # bins is either an int, or a sequence of ints or a sequence of arrays
1975
+ bins_is_array = not (
1976
+ isinstance(bins, int) or builtins.all(isinstance(b, int) for b in bins)
1977
+ )
1978
+ if bins_is_array:
1979
+ bins = normalize_seq_array_like(bins)
1980
+ bins_dtypes = [b.dtype for b in bins]
1981
+ bins = [_util.cast_if_needed(b, sample.dtype) for b in bins]
1982
+
1983
+ if range is not None:
1984
+ range = range.flatten().tolist()
1985
+
1986
+ if weights is not None:
1987
+ # range=... is required : interleave min and max values per dimension
1988
+ mm = sample.aminmax(dim=0)
1989
+ range = torch.cat(mm).reshape(2, -1).T.flatten()
1990
+ range = tuple(range.tolist())
1991
+ weights = _util.cast_if_needed(weights, sample.dtype)
1992
+ w_kwd = {"weight": weights}
1993
+ else:
1994
+ w_kwd = {}
1995
+
1996
+ h, b = torch.histogramdd(sample, bins, range, density=bool(density), **w_kwd)
1997
+
1998
+ if bins_is_array:
1999
+ b = [_util.cast_if_needed(bb, dtyp) for bb, dtyp in zip(b, bins_dtypes)]
2000
+
2001
+ return h, b
2002
+
2003
+
2004
+ # ### odds and ends
2005
+
2006
+
2007
+ def min_scalar_type(a: ArrayLike, /):
2008
+ # https://github.com/numpy/numpy/blob/maintenance/1.24.x/numpy/core/src/multiarray/convert_datatype.c#L1288
2009
+
2010
+ from ._dtypes import DType
2011
+
2012
+ if a.numel() > 1:
2013
+ # numpy docs: "For non-scalar array a, returns the vector's dtype unmodified."
2014
+ return DType(a.dtype)
2015
+
2016
+ if a.dtype == torch.bool:
2017
+ dtype = torch.bool
2018
+
2019
+ elif a.dtype.is_complex:
2020
+ fi = torch.finfo(torch.float32)
2021
+ fits_in_single = a.dtype == torch.complex64 or (
2022
+ fi.min <= a.real <= fi.max and fi.min <= a.imag <= fi.max
2023
+ )
2024
+ dtype = torch.complex64 if fits_in_single else torch.complex128
2025
+
2026
+ elif a.dtype.is_floating_point:
2027
+ for dt in [torch.float16, torch.float32, torch.float64]:
2028
+ fi = torch.finfo(dt)
2029
+ if fi.min <= a <= fi.max:
2030
+ dtype = dt
2031
+ break
2032
+ else:
2033
+ # must be integer
2034
+ for dt in [torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64]:
2035
+ # Prefer unsigned int where possible, as numpy does.
2036
+ ii = torch.iinfo(dt)
2037
+ if ii.min <= a <= ii.max:
2038
+ dtype = dt
2039
+ break
2040
+
2041
+ return DType(dtype)
2042
+
2043
+
2044
+ def pad(array: ArrayLike, pad_width: ArrayLike, mode="constant", **kwargs):
2045
+ if mode != "constant":
2046
+ raise NotImplementedError
2047
+ value = kwargs.get("constant_values", 0)
2048
+ # `value` must be a python scalar for torch.nn.functional.pad
2049
+ typ = _dtypes_impl.python_type_for_torch(array.dtype)
2050
+ value = typ(value)
2051
+
2052
+ pad_width = torch.broadcast_to(pad_width, (array.ndim, 2))
2053
+ pad_width = torch.flip(pad_width, (0,)).flatten()
2054
+
2055
+ return torch.nn.functional.pad(array, tuple(pad_width), value=value)
parrot/lib/python3.10/site-packages/torch/_numpy/_getlimits.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ import torch
4
+
5
+ from . import _dtypes
6
+
7
+
8
+ def finfo(dtyp):
9
+ torch_dtype = _dtypes.dtype(dtyp).torch_dtype
10
+ return torch.finfo(torch_dtype)
11
+
12
+
13
+ def iinfo(dtyp):
14
+ torch_dtype = _dtypes.dtype(dtyp).torch_dtype
15
+ return torch.iinfo(torch_dtype)
parrot/lib/python3.10/site-packages/torch/_numpy/_ndarray.py ADDED
@@ -0,0 +1,591 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ from __future__ import annotations
4
+
5
+ import builtins
6
+ import math
7
+ import operator
8
+ from typing import Sequence
9
+
10
+ import torch
11
+
12
+ from . import _dtypes, _dtypes_impl, _funcs, _ufuncs, _util
13
+ from ._normalizations import (
14
+ ArrayLike,
15
+ normalize_array_like,
16
+ normalizer,
17
+ NotImplementedType,
18
+ )
19
+
20
+ newaxis = None
21
+
22
+ FLAGS = [
23
+ "C_CONTIGUOUS",
24
+ "F_CONTIGUOUS",
25
+ "OWNDATA",
26
+ "WRITEABLE",
27
+ "ALIGNED",
28
+ "WRITEBACKIFCOPY",
29
+ "FNC",
30
+ "FORC",
31
+ "BEHAVED",
32
+ "CARRAY",
33
+ "FARRAY",
34
+ ]
35
+
36
+ SHORTHAND_TO_FLAGS = {
37
+ "C": "C_CONTIGUOUS",
38
+ "F": "F_CONTIGUOUS",
39
+ "O": "OWNDATA",
40
+ "W": "WRITEABLE",
41
+ "A": "ALIGNED",
42
+ "X": "WRITEBACKIFCOPY",
43
+ "B": "BEHAVED",
44
+ "CA": "CARRAY",
45
+ "FA": "FARRAY",
46
+ }
47
+
48
+
49
+ class Flags:
50
+ def __init__(self, flag_to_value: dict):
51
+ assert all(k in FLAGS for k in flag_to_value.keys()) # sanity check
52
+ self._flag_to_value = flag_to_value
53
+
54
+ def __getattr__(self, attr: str):
55
+ if attr.islower() and attr.upper() in FLAGS:
56
+ return self[attr.upper()]
57
+ else:
58
+ raise AttributeError(f"No flag attribute '{attr}'")
59
+
60
+ def __getitem__(self, key):
61
+ if key in SHORTHAND_TO_FLAGS.keys():
62
+ key = SHORTHAND_TO_FLAGS[key]
63
+ if key in FLAGS:
64
+ try:
65
+ return self._flag_to_value[key]
66
+ except KeyError as e:
67
+ raise NotImplementedError(f"{key=}") from e
68
+ else:
69
+ raise KeyError(f"No flag key '{key}'")
70
+
71
+ def __setattr__(self, attr, value):
72
+ if attr.islower() and attr.upper() in FLAGS:
73
+ self[attr.upper()] = value
74
+ else:
75
+ super().__setattr__(attr, value)
76
+
77
+ def __setitem__(self, key, value):
78
+ if key in FLAGS or key in SHORTHAND_TO_FLAGS.keys():
79
+ raise NotImplementedError("Modifying flags is not implemented")
80
+ else:
81
+ raise KeyError(f"No flag key '{key}'")
82
+
83
+
84
+ def create_method(fn, name=None):
85
+ name = name or fn.__name__
86
+
87
+ def f(*args, **kwargs):
88
+ return fn(*args, **kwargs)
89
+
90
+ f.__name__ = name
91
+ f.__qualname__ = f"ndarray.{name}"
92
+ return f
93
+
94
+
95
+ # Map ndarray.name_method -> np.name_func
96
+ # If name_func == None, it means that name_method == name_func
97
+ methods = {
98
+ "clip": None,
99
+ "nonzero": None,
100
+ "repeat": None,
101
+ "round": None,
102
+ "squeeze": None,
103
+ "swapaxes": None,
104
+ "ravel": None,
105
+ # linalg
106
+ "diagonal": None,
107
+ "dot": None,
108
+ "trace": None,
109
+ # sorting
110
+ "argsort": None,
111
+ "searchsorted": None,
112
+ # reductions
113
+ "argmax": None,
114
+ "argmin": None,
115
+ "any": None,
116
+ "all": None,
117
+ "max": None,
118
+ "min": None,
119
+ "ptp": None,
120
+ "sum": None,
121
+ "prod": None,
122
+ "mean": None,
123
+ "var": None,
124
+ "std": None,
125
+ # scans
126
+ "cumsum": None,
127
+ "cumprod": None,
128
+ # advanced indexing
129
+ "take": None,
130
+ "choose": None,
131
+ }
132
+
133
+ dunder = {
134
+ "abs": "absolute",
135
+ "invert": None,
136
+ "pos": "positive",
137
+ "neg": "negative",
138
+ "gt": "greater",
139
+ "lt": "less",
140
+ "ge": "greater_equal",
141
+ "le": "less_equal",
142
+ }
143
+
144
+ # dunder methods with right-looking and in-place variants
145
+ ri_dunder = {
146
+ "add": None,
147
+ "sub": "subtract",
148
+ "mul": "multiply",
149
+ "truediv": "divide",
150
+ "floordiv": "floor_divide",
151
+ "pow": "power",
152
+ "mod": "remainder",
153
+ "and": "bitwise_and",
154
+ "or": "bitwise_or",
155
+ "xor": "bitwise_xor",
156
+ "lshift": "left_shift",
157
+ "rshift": "right_shift",
158
+ "matmul": None,
159
+ }
160
+
161
+
162
+ def _upcast_int_indices(index):
163
+ if isinstance(index, torch.Tensor):
164
+ if index.dtype in (torch.int8, torch.int16, torch.int32, torch.uint8):
165
+ return index.to(torch.int64)
166
+ elif isinstance(index, tuple):
167
+ return tuple(_upcast_int_indices(i) for i in index)
168
+ return index
169
+
170
+
171
+ # Used to indicate that a parameter is unspecified (as opposed to explicitly
172
+ # `None`)
173
+ class _Unspecified:
174
+ pass
175
+
176
+
177
+ _Unspecified.unspecified = _Unspecified()
178
+
179
+ ###############################################################
180
+ # ndarray class #
181
+ ###############################################################
182
+
183
+
184
+ class ndarray:
185
+ def __init__(self, t=None):
186
+ if t is None:
187
+ self.tensor = torch.Tensor()
188
+ elif isinstance(t, torch.Tensor):
189
+ self.tensor = t
190
+ else:
191
+ raise ValueError(
192
+ "ndarray constructor is not recommended; prefer"
193
+ "either array(...) or zeros/empty(...)"
194
+ )
195
+
196
+ # Register NumPy functions as methods
197
+ for method, name in methods.items():
198
+ fn = getattr(_funcs, name or method)
199
+ vars()[method] = create_method(fn, method)
200
+
201
+ # Regular methods but coming from ufuncs
202
+ conj = create_method(_ufuncs.conjugate, "conj")
203
+ conjugate = create_method(_ufuncs.conjugate)
204
+
205
+ for method, name in dunder.items():
206
+ fn = getattr(_ufuncs, name or method)
207
+ method = f"__{method}__"
208
+ vars()[method] = create_method(fn, method)
209
+
210
+ for method, name in ri_dunder.items():
211
+ fn = getattr(_ufuncs, name or method)
212
+ plain = f"__{method}__"
213
+ vars()[plain] = create_method(fn, plain)
214
+ rvar = f"__r{method}__"
215
+ vars()[rvar] = create_method(lambda self, other, fn=fn: fn(other, self), rvar)
216
+ ivar = f"__i{method}__"
217
+ vars()[ivar] = create_method(
218
+ lambda self, other, fn=fn: fn(self, other, out=self), ivar
219
+ )
220
+
221
+ # There's no __idivmod__
222
+ __divmod__ = create_method(_ufuncs.divmod, "__divmod__")
223
+ __rdivmod__ = create_method(
224
+ lambda self, other: _ufuncs.divmod(other, self), "__rdivmod__"
225
+ )
226
+
227
+ # prevent loop variables leaking into the ndarray class namespace
228
+ del ivar, rvar, name, plain, fn, method
229
+
230
+ @property
231
+ def shape(self):
232
+ return tuple(self.tensor.shape)
233
+
234
+ @property
235
+ def size(self):
236
+ return self.tensor.numel()
237
+
238
+ @property
239
+ def ndim(self):
240
+ return self.tensor.ndim
241
+
242
+ @property
243
+ def dtype(self):
244
+ return _dtypes.dtype(self.tensor.dtype)
245
+
246
+ @property
247
+ def strides(self):
248
+ elsize = self.tensor.element_size()
249
+ return tuple(stride * elsize for stride in self.tensor.stride())
250
+
251
+ @property
252
+ def itemsize(self):
253
+ return self.tensor.element_size()
254
+
255
+ @property
256
+ def flags(self):
257
+ # Note contiguous in torch is assumed C-style
258
+ return Flags(
259
+ {
260
+ "C_CONTIGUOUS": self.tensor.is_contiguous(),
261
+ "F_CONTIGUOUS": self.T.tensor.is_contiguous(),
262
+ "OWNDATA": self.tensor._base is None,
263
+ "WRITEABLE": True, # pytorch does not have readonly tensors
264
+ }
265
+ )
266
+
267
+ @property
268
+ def data(self):
269
+ return self.tensor.data_ptr()
270
+
271
+ @property
272
+ def nbytes(self):
273
+ return self.tensor.storage().nbytes()
274
+
275
+ @property
276
+ def T(self):
277
+ return self.transpose()
278
+
279
+ @property
280
+ def real(self):
281
+ return _funcs.real(self)
282
+
283
+ @real.setter
284
+ def real(self, value):
285
+ self.tensor.real = asarray(value).tensor
286
+
287
+ @property
288
+ def imag(self):
289
+ return _funcs.imag(self)
290
+
291
+ @imag.setter
292
+ def imag(self, value):
293
+ self.tensor.imag = asarray(value).tensor
294
+
295
+ # ctors
296
+ def astype(self, dtype, order="K", casting="unsafe", subok=True, copy=True):
297
+ if order != "K":
298
+ raise NotImplementedError(f"astype(..., order={order} is not implemented.")
299
+ if casting != "unsafe":
300
+ raise NotImplementedError(
301
+ f"astype(..., casting={casting} is not implemented."
302
+ )
303
+ if not subok:
304
+ raise NotImplementedError(f"astype(..., subok={subok} is not implemented.")
305
+ if not copy:
306
+ raise NotImplementedError(f"astype(..., copy={copy} is not implemented.")
307
+ torch_dtype = _dtypes.dtype(dtype).torch_dtype
308
+ t = self.tensor.to(torch_dtype)
309
+ return ndarray(t)
310
+
311
+ @normalizer
312
+ def copy(self: ArrayLike, order: NotImplementedType = "C"):
313
+ return self.clone()
314
+
315
+ @normalizer
316
+ def flatten(self: ArrayLike, order: NotImplementedType = "C"):
317
+ return torch.flatten(self)
318
+
319
+ def resize(self, *new_shape, refcheck=False):
320
+ # NB: differs from np.resize: fills with zeros instead of making repeated copies of input.
321
+ if refcheck:
322
+ raise NotImplementedError(
323
+ f"resize(..., refcheck={refcheck} is not implemented."
324
+ )
325
+ if new_shape in [(), (None,)]:
326
+ return
327
+
328
+ # support both x.resize((2, 2)) and x.resize(2, 2)
329
+ if len(new_shape) == 1:
330
+ new_shape = new_shape[0]
331
+ if isinstance(new_shape, int):
332
+ new_shape = (new_shape,)
333
+
334
+ if builtins.any(x < 0 for x in new_shape):
335
+ raise ValueError("all elements of `new_shape` must be non-negative")
336
+
337
+ new_numel, old_numel = math.prod(new_shape), self.tensor.numel()
338
+
339
+ self.tensor.resize_(new_shape)
340
+
341
+ if new_numel >= old_numel:
342
+ # zero-fill new elements
343
+ assert self.tensor.is_contiguous()
344
+ b = self.tensor.flatten() # does not copy
345
+ b[old_numel:].zero_()
346
+
347
+ def view(self, dtype=_Unspecified.unspecified, type=_Unspecified.unspecified):
348
+ if dtype is _Unspecified.unspecified:
349
+ dtype = self.dtype
350
+ if type is not _Unspecified.unspecified:
351
+ raise NotImplementedError(f"view(..., type={type} is not implemented.")
352
+ torch_dtype = _dtypes.dtype(dtype).torch_dtype
353
+ tview = self.tensor.view(torch_dtype)
354
+ return ndarray(tview)
355
+
356
+ @normalizer
357
+ def fill(self, value: ArrayLike):
358
+ # Both Pytorch and NumPy accept 0D arrays/tensors and scalars, and
359
+ # error out on D > 0 arrays
360
+ self.tensor.fill_(value)
361
+
362
+ def tolist(self):
363
+ return self.tensor.tolist()
364
+
365
+ def __iter__(self):
366
+ return (ndarray(x) for x in self.tensor.__iter__())
367
+
368
+ def __str__(self):
369
+ return (
370
+ str(self.tensor)
371
+ .replace("tensor", "torch.ndarray")
372
+ .replace("dtype=torch.", "dtype=")
373
+ )
374
+
375
+ __repr__ = create_method(__str__)
376
+
377
+ def __eq__(self, other):
378
+ try:
379
+ return _ufuncs.equal(self, other)
380
+ except (RuntimeError, TypeError):
381
+ # Failed to convert other to array: definitely not equal.
382
+ falsy = torch.full(self.shape, fill_value=False, dtype=bool)
383
+ return asarray(falsy)
384
+
385
+ def __ne__(self, other):
386
+ return ~(self == other)
387
+
388
+ def __index__(self):
389
+ try:
390
+ return operator.index(self.tensor.item())
391
+ except Exception as exc:
392
+ raise TypeError(
393
+ "only integer scalar arrays can be converted to a scalar index"
394
+ ) from exc
395
+
396
+ def __bool__(self):
397
+ return bool(self.tensor)
398
+
399
+ def __int__(self):
400
+ return int(self.tensor)
401
+
402
+ def __float__(self):
403
+ return float(self.tensor)
404
+
405
+ def __complex__(self):
406
+ return complex(self.tensor)
407
+
408
+ def is_integer(self):
409
+ try:
410
+ v = self.tensor.item()
411
+ result = int(v) == v
412
+ except Exception:
413
+ result = False
414
+ return result
415
+
416
+ def __len__(self):
417
+ return self.tensor.shape[0]
418
+
419
+ def __contains__(self, x):
420
+ return self.tensor.__contains__(x)
421
+
422
+ def transpose(self, *axes):
423
+ # np.transpose(arr, axis=None) but arr.transpose(*axes)
424
+ return _funcs.transpose(self, axes)
425
+
426
+ def reshape(self, *shape, order="C"):
427
+ # arr.reshape(shape) and arr.reshape(*shape)
428
+ return _funcs.reshape(self, shape, order=order)
429
+
430
+ def sort(self, axis=-1, kind=None, order=None):
431
+ # ndarray.sort works in-place
432
+ _funcs.copyto(self, _funcs.sort(self, axis, kind, order))
433
+
434
+ def item(self, *args):
435
+ # Mimic NumPy's implementation with three special cases (no arguments,
436
+ # a flat index and a multi-index):
437
+ # https://github.com/numpy/numpy/blob/main/numpy/core/src/multiarray/methods.c#L702
438
+ if args == ():
439
+ return self.tensor.item()
440
+ elif len(args) == 1:
441
+ # int argument
442
+ return self.ravel()[args[0]]
443
+ else:
444
+ return self.__getitem__(args)
445
+
446
+ def __getitem__(self, index):
447
+ tensor = self.tensor
448
+
449
+ def neg_step(i, s):
450
+ if not (isinstance(s, slice) and s.step is not None and s.step < 0):
451
+ return s
452
+
453
+ nonlocal tensor
454
+ tensor = torch.flip(tensor, (i,))
455
+
456
+ # Account for the fact that a slice includes the start but not the end
457
+ assert isinstance(s.start, int) or s.start is None
458
+ assert isinstance(s.stop, int) or s.stop is None
459
+ start = s.stop + 1 if s.stop else None
460
+ stop = s.start + 1 if s.start else None
461
+
462
+ return slice(start, stop, -s.step)
463
+
464
+ if isinstance(index, Sequence):
465
+ index = type(index)(neg_step(i, s) for i, s in enumerate(index))
466
+ else:
467
+ index = neg_step(0, index)
468
+ index = _util.ndarrays_to_tensors(index)
469
+ index = _upcast_int_indices(index)
470
+ return ndarray(tensor.__getitem__(index))
471
+
472
+ def __setitem__(self, index, value):
473
+ index = _util.ndarrays_to_tensors(index)
474
+ index = _upcast_int_indices(index)
475
+
476
+ if not _dtypes_impl.is_scalar(value):
477
+ value = normalize_array_like(value)
478
+ value = _util.cast_if_needed(value, self.tensor.dtype)
479
+
480
+ return self.tensor.__setitem__(index, value)
481
+
482
+ take = _funcs.take
483
+ put = _funcs.put
484
+
485
+ def __dlpack__(self, *, stream=None):
486
+ return self.tensor.__dlpack__(stream=stream)
487
+
488
+ def __dlpack_device__(self):
489
+ return self.tensor.__dlpack_device__()
490
+
491
+
492
+ def _tolist(obj):
493
+ """Recursively convert tensors into lists."""
494
+ a1 = []
495
+ for elem in obj:
496
+ if isinstance(elem, (list, tuple)):
497
+ elem = _tolist(elem)
498
+ if isinstance(elem, ndarray):
499
+ a1.append(elem.tensor.tolist())
500
+ else:
501
+ a1.append(elem)
502
+ return a1
503
+
504
+
505
+ # This is the ideally the only place which talks to ndarray directly.
506
+ # The rest goes through asarray (preferred) or array.
507
+
508
+
509
+ def array(obj, dtype=None, *, copy=True, order="K", subok=False, ndmin=0, like=None):
510
+ if subok is not False:
511
+ raise NotImplementedError("'subok' parameter is not supported.")
512
+ if like is not None:
513
+ raise NotImplementedError("'like' parameter is not supported.")
514
+ if order != "K":
515
+ raise NotImplementedError
516
+
517
+ # a happy path
518
+ if (
519
+ isinstance(obj, ndarray)
520
+ and copy is False
521
+ and dtype is None
522
+ and ndmin <= obj.ndim
523
+ ):
524
+ return obj
525
+
526
+ if isinstance(obj, (list, tuple)):
527
+ # FIXME and they have the same dtype, device, etc
528
+ if obj and all(isinstance(x, torch.Tensor) for x in obj):
529
+ # list of arrays: *under torch.Dynamo* these are FakeTensors
530
+ obj = torch.stack(obj)
531
+ else:
532
+ # XXX: remove tolist
533
+ # lists of ndarrays: [1, [2, 3], ndarray(4)] convert to lists of lists
534
+ obj = _tolist(obj)
535
+
536
+ # is obj an ndarray already?
537
+ if isinstance(obj, ndarray):
538
+ obj = obj.tensor
539
+
540
+ # is a specific dtype requested?
541
+ torch_dtype = None
542
+ if dtype is not None:
543
+ torch_dtype = _dtypes.dtype(dtype).torch_dtype
544
+
545
+ tensor = _util._coerce_to_tensor(obj, torch_dtype, copy, ndmin)
546
+ return ndarray(tensor)
547
+
548
+
549
+ def asarray(a, dtype=None, order="K", *, like=None):
550
+ return array(a, dtype=dtype, order=order, like=like, copy=False, ndmin=0)
551
+
552
+
553
+ def ascontiguousarray(a, dtype=None, *, like=None):
554
+ arr = asarray(a, dtype=dtype, like=like)
555
+ if not arr.tensor.is_contiguous():
556
+ arr.tensor = arr.tensor.contiguous()
557
+ return arr
558
+
559
+
560
+ def from_dlpack(x, /):
561
+ t = torch.from_dlpack(x)
562
+ return ndarray(t)
563
+
564
+
565
+ def _extract_dtype(entry):
566
+ try:
567
+ dty = _dtypes.dtype(entry)
568
+ except Exception:
569
+ dty = asarray(entry).dtype
570
+ return dty
571
+
572
+
573
+ def can_cast(from_, to, casting="safe"):
574
+ from_ = _extract_dtype(from_)
575
+ to_ = _extract_dtype(to)
576
+
577
+ return _dtypes_impl.can_cast_impl(from_.torch_dtype, to_.torch_dtype, casting)
578
+
579
+
580
+ def result_type(*arrays_and_dtypes):
581
+ tensors = []
582
+ for entry in arrays_and_dtypes:
583
+ try:
584
+ t = asarray(entry).tensor
585
+ except (RuntimeError, ValueError, TypeError):
586
+ dty = _dtypes.dtype(entry)
587
+ t = torch.empty(1, dtype=dty.torch_dtype)
588
+ tensors.append(t)
589
+
590
+ torch_dtype = _dtypes_impl.result_type_impl(*tensors)
591
+ return _dtypes.dtype(torch_dtype)
parrot/lib/python3.10/site-packages/torch/_numpy/_normalizations.py ADDED
@@ -0,0 +1,258 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ """ "Normalize" arguments: convert array_likes to tensors, dtypes to torch dtypes and so on.
4
+ """
5
+ from __future__ import annotations
6
+
7
+ import functools
8
+ import inspect
9
+ import operator
10
+ import typing
11
+
12
+ import torch
13
+
14
+ from . import _dtypes, _dtypes_impl, _util
15
+
16
+ ArrayLike = typing.TypeVar("ArrayLike")
17
+ Scalar = typing.Union[int, float, complex, bool]
18
+ ArrayLikeOrScalar = typing.Union[ArrayLike, Scalar]
19
+
20
+ DTypeLike = typing.TypeVar("DTypeLike")
21
+ AxisLike = typing.TypeVar("AxisLike")
22
+ NDArray = typing.TypeVar("NDArray")
23
+ CastingModes = typing.TypeVar("CastingModes")
24
+ KeepDims = typing.TypeVar("KeepDims")
25
+
26
+ # OutArray is to annotate the out= array argument.
27
+ #
28
+ # This one is special is several respects:
29
+ # First, It needs to be an NDArray, and we need to preserve the `result is out`
30
+ # semantics. Therefore, we cannot just extract the Tensor from the out array.
31
+ # So we never pass the out array to implementer functions and handle it in the
32
+ # `normalizer` below.
33
+ # Second, the out= argument can be either keyword or positional argument, and
34
+ # as a positional arg, it can be anywhere in the signature.
35
+ # To handle all this, we define a special `OutArray` annotation and dispatch on it.
36
+ #
37
+ OutArray = typing.TypeVar("OutArray")
38
+
39
+ try:
40
+ from typing import NotImplementedType
41
+ except ImportError:
42
+ NotImplementedType = typing.TypeVar("NotImplementedType")
43
+
44
+
45
+ def normalize_array_like(x, parm=None):
46
+ from ._ndarray import asarray
47
+
48
+ return asarray(x).tensor
49
+
50
+
51
+ def normalize_array_like_or_scalar(x, parm=None):
52
+ if _dtypes_impl.is_scalar_or_symbolic(x):
53
+ return x
54
+ return normalize_array_like(x, parm)
55
+
56
+
57
+ def normalize_optional_array_like_or_scalar(x, parm=None):
58
+ if x is None:
59
+ return None
60
+ return normalize_array_like_or_scalar(x, parm)
61
+
62
+
63
+ def normalize_optional_array_like(x, parm=None):
64
+ # This explicit normalizer is needed because otherwise normalize_array_like
65
+ # does not run for a parameter annotated as Optional[ArrayLike]
66
+ return None if x is None else normalize_array_like(x, parm)
67
+
68
+
69
+ def normalize_seq_array_like(x, parm=None):
70
+ return tuple(normalize_array_like(value) for value in x)
71
+
72
+
73
+ def normalize_dtype(dtype, parm=None):
74
+ # cf _decorators.dtype_to_torch
75
+ torch_dtype = None
76
+ if dtype is not None:
77
+ dtype = _dtypes.dtype(dtype)
78
+ torch_dtype = dtype.torch_dtype
79
+ return torch_dtype
80
+
81
+
82
+ def normalize_not_implemented(arg, parm):
83
+ if arg != parm.default:
84
+ raise NotImplementedError(f"'{parm.name}' parameter is not supported.")
85
+
86
+
87
+ def normalize_axis_like(arg, parm=None):
88
+ from ._ndarray import ndarray
89
+
90
+ if isinstance(arg, ndarray):
91
+ arg = operator.index(arg)
92
+ return arg
93
+
94
+
95
+ def normalize_ndarray(arg, parm=None):
96
+ # check the arg is an ndarray, extract its tensor attribute
97
+ if arg is None:
98
+ return arg
99
+
100
+ from ._ndarray import ndarray
101
+
102
+ if not isinstance(arg, ndarray):
103
+ raise TypeError(f"'{parm.name}' must be an array")
104
+ return arg.tensor
105
+
106
+
107
+ def normalize_outarray(arg, parm=None):
108
+ # almost normalize_ndarray, only return the array, not its tensor
109
+ if arg is None:
110
+ return arg
111
+ from ._ndarray import ndarray
112
+
113
+ # Dynamo can pass torch tensors as out arguments,
114
+ # wrap it in an ndarray before processing
115
+ if isinstance(arg, torch.Tensor):
116
+ arg = ndarray(arg)
117
+
118
+ if not isinstance(arg, ndarray):
119
+ raise TypeError(f"'{parm.name}' must be an array")
120
+ return arg
121
+
122
+
123
+ def normalize_casting(arg, parm=None):
124
+ if arg not in ["no", "equiv", "safe", "same_kind", "unsafe"]:
125
+ raise ValueError(
126
+ f"casting must be one of 'no', 'equiv', 'safe', 'same_kind', or 'unsafe' (got '{arg}')"
127
+ )
128
+ return arg
129
+
130
+
131
+ normalizers = {
132
+ "ArrayLike": normalize_array_like,
133
+ "ArrayLikeOrScalar": normalize_array_like_or_scalar,
134
+ "Optional[ArrayLike]": normalize_optional_array_like,
135
+ "Sequence[ArrayLike]": normalize_seq_array_like,
136
+ "Optional[ArrayLikeOrScalar]": normalize_optional_array_like_or_scalar,
137
+ "Optional[NDArray]": normalize_ndarray,
138
+ "Optional[OutArray]": normalize_outarray,
139
+ "NDArray": normalize_ndarray,
140
+ "Optional[DTypeLike]": normalize_dtype,
141
+ "AxisLike": normalize_axis_like,
142
+ "NotImplementedType": normalize_not_implemented,
143
+ "Optional[CastingModes]": normalize_casting,
144
+ }
145
+
146
+
147
+ def maybe_normalize(arg, parm):
148
+ """Normalize arg if a normalizer is registered."""
149
+ normalizer = normalizers.get(parm.annotation, None)
150
+ return normalizer(arg, parm) if normalizer else arg
151
+
152
+
153
+ # ### Return value helpers ###
154
+
155
+
156
+ def maybe_copy_to(out, result, promote_scalar_result=False):
157
+ # NB: here out is either an ndarray or None
158
+ if out is None:
159
+ return result
160
+ elif isinstance(result, torch.Tensor):
161
+ if result.shape != out.shape:
162
+ can_fit = result.numel() == 1 and out.ndim == 0
163
+ if promote_scalar_result and can_fit:
164
+ result = result.squeeze()
165
+ else:
166
+ raise ValueError(
167
+ f"Bad size of the out array: out.shape = {out.shape}"
168
+ f" while result.shape = {result.shape}."
169
+ )
170
+ out.tensor.copy_(result)
171
+ return out
172
+ elif isinstance(result, (tuple, list)):
173
+ return type(result)(
174
+ maybe_copy_to(o, r, promote_scalar_result) for o, r in zip(out, result)
175
+ )
176
+ else:
177
+ raise AssertionError # We should never hit this path
178
+
179
+
180
+ def wrap_tensors(result):
181
+ from ._ndarray import ndarray
182
+
183
+ if isinstance(result, torch.Tensor):
184
+ return ndarray(result)
185
+ elif isinstance(result, (tuple, list)):
186
+ result = type(result)(wrap_tensors(x) for x in result)
187
+ return result
188
+
189
+
190
+ def array_or_scalar(values, py_type=float, return_scalar=False):
191
+ if return_scalar:
192
+ return py_type(values.item())
193
+ else:
194
+ from ._ndarray import ndarray
195
+
196
+ return ndarray(values)
197
+
198
+
199
+ # ### The main decorator to normalize arguments / postprocess the output ###
200
+
201
+
202
+ def normalizer(_func=None, *, promote_scalar_result=False):
203
+ def normalizer_inner(func):
204
+ @functools.wraps(func)
205
+ def wrapped(*args, **kwds):
206
+ sig = inspect.signature(func)
207
+ params = sig.parameters
208
+ first_param = next(iter(params.values()))
209
+
210
+ # NumPy's API does not have positional args before variadic positional args
211
+ if first_param.kind == inspect.Parameter.VAR_POSITIONAL:
212
+ args = [maybe_normalize(arg, first_param) for arg in args]
213
+ else:
214
+ # NB: extra unknown arguments: pass through, will raise in func(*args) below
215
+ args = (
216
+ tuple(
217
+ maybe_normalize(arg, parm)
218
+ for arg, parm in zip(args, params.values())
219
+ )
220
+ + args[len(params.values()) :]
221
+ )
222
+
223
+ kwds = {
224
+ name: maybe_normalize(arg, params[name]) if name in params else arg
225
+ for name, arg in kwds.items()
226
+ }
227
+
228
+ result = func(*args, **kwds)
229
+
230
+ # keepdims
231
+ bound_args = None
232
+ if "keepdims" in params and params["keepdims"].annotation == "KeepDims":
233
+ # keepdims can be in any position so we need sig.bind
234
+ bound_args = sig.bind(*args, **kwds).arguments
235
+ if bound_args.get("keepdims", False):
236
+ # In this case the first arg is the initial tensor and
237
+ # the second arg is (optionally) the axis
238
+ tensor = args[0]
239
+ axis = bound_args.get("axis")
240
+ result = _util.apply_keepdims(result, axis, tensor.ndim)
241
+
242
+ # out
243
+ if "out" in params:
244
+ # out can be in any position so we need sig.bind
245
+ if bound_args is None:
246
+ bound_args = sig.bind(*args, **kwds).arguments
247
+ out = bound_args.get("out")
248
+ result = maybe_copy_to(out, result, promote_scalar_result)
249
+ result = wrap_tensors(result)
250
+
251
+ return result
252
+
253
+ return wrapped
254
+
255
+ if _func is None:
256
+ return normalizer_inner
257
+ else:
258
+ return normalizer_inner(_func)
parrot/lib/python3.10/site-packages/torch/_numpy/_ufuncs.py ADDED
@@ -0,0 +1,334 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Optional
6
+
7
+ import torch
8
+
9
+ from . import _binary_ufuncs_impl, _dtypes_impl, _unary_ufuncs_impl, _util
10
+ from ._normalizations import (
11
+ ArrayLike,
12
+ ArrayLikeOrScalar,
13
+ CastingModes,
14
+ DTypeLike,
15
+ normalizer,
16
+ NotImplementedType,
17
+ OutArray,
18
+ )
19
+
20
+
21
+ def _ufunc_postprocess(result, out, casting):
22
+ if out is not None:
23
+ result = _util.typecast_tensor(result, out.dtype.torch_dtype, casting)
24
+ result = torch.broadcast_to(result, out.shape)
25
+ return result
26
+
27
+
28
+ # ############# Binary ufuncs ######################
29
+
30
+ _binary = [
31
+ name
32
+ for name in dir(_binary_ufuncs_impl)
33
+ if not name.startswith("_") and name not in ["torch", "matmul", "divmod", "ldexp"]
34
+ ]
35
+
36
+
37
+ NEP50_FUNCS = (
38
+ "add",
39
+ "subtract",
40
+ "multiply",
41
+ "floor_divide",
42
+ "true_divide",
43
+ "divide",
44
+ "remainder",
45
+ "bitwise_and",
46
+ "bitwise_or",
47
+ "bitwise_xor",
48
+ "bitwise_left_shift",
49
+ "bitwise_right_shift",
50
+ "hypot",
51
+ "arctan2",
52
+ "logaddexp",
53
+ "logaddexp2",
54
+ "heaviside",
55
+ "copysign",
56
+ "fmax",
57
+ "minimum",
58
+ "fmin",
59
+ "maximum",
60
+ "fmod",
61
+ "gcd",
62
+ "lcm",
63
+ "pow",
64
+ )
65
+
66
+
67
+ def deco_binary_ufunc(torch_func):
68
+ """Common infra for binary ufuncs.
69
+
70
+ Normalize arguments, sort out type casting, broadcasting and delegate to
71
+ the pytorch functions for the actual work.
72
+ """
73
+
74
+ @normalizer
75
+ def wrapped(
76
+ x1: ArrayLikeOrScalar,
77
+ x2: ArrayLikeOrScalar,
78
+ /,
79
+ out: Optional[OutArray] = None,
80
+ *,
81
+ where: NotImplementedType = True,
82
+ casting: Optional[CastingModes] = "same_kind",
83
+ order: NotImplementedType = "K",
84
+ dtype: Optional[DTypeLike] = None,
85
+ subok: NotImplementedType = False,
86
+ signature: NotImplementedType = None,
87
+ extobj: NotImplementedType = None,
88
+ ):
89
+ if dtype is not None:
90
+
91
+ def cast(x, dtype):
92
+ if isinstance(x, torch.Tensor):
93
+ return _util.typecast_tensor(x, dtype, casting)
94
+ else:
95
+ return torch.as_tensor(x, dtype=dtype)
96
+
97
+ x1 = cast(x1, dtype)
98
+ x2 = cast(x2, dtype)
99
+ elif isinstance(x1, torch.Tensor) and isinstance(x2, torch.Tensor):
100
+ dtype = _dtypes_impl.result_type_impl(x1, x2)
101
+ x1, x2 = _util.typecast_tensors((x1, x2), dtype, casting)
102
+ else:
103
+ x1, x2 = _dtypes_impl.nep50_to_tensors(
104
+ x1, x2, torch_func.__name__ in NEP50_FUNCS, torch_func.__name__
105
+ )
106
+
107
+ result = torch_func(x1, x2)
108
+
109
+ return _ufunc_postprocess(result, out, casting)
110
+
111
+ wrapped.__qualname__ = torch_func.__name__
112
+ wrapped.__name__ = torch_func.__name__
113
+
114
+ return wrapped
115
+
116
+
117
+ # matmul's signature is _slightly_ different from other ufuncs:
118
+ # - no where=...
119
+ # - additional axis=..., axes=...
120
+ # - no NEP50 scalars in or out
121
+ @normalizer
122
+ def matmul(
123
+ x1: ArrayLike,
124
+ x2: ArrayLike,
125
+ /,
126
+ out: Optional[OutArray] = None,
127
+ *,
128
+ casting: Optional[CastingModes] = "same_kind",
129
+ order: NotImplementedType = "K",
130
+ dtype: Optional[DTypeLike] = None,
131
+ subok: NotImplementedType = False,
132
+ signature: NotImplementedType = None,
133
+ extobj: NotImplementedType = None,
134
+ axes: NotImplementedType = None,
135
+ axis: NotImplementedType = None,
136
+ ):
137
+ if dtype is None:
138
+ dtype = _dtypes_impl.result_type_impl(x1, x2)
139
+ x1, x2 = _util.typecast_tensors((x1, x2), dtype, casting)
140
+
141
+ result = _binary_ufuncs_impl.matmul(x1, x2)
142
+
143
+ result = _ufunc_postprocess(result, out, casting)
144
+ return result
145
+
146
+
147
+ # ldexp casting is special : the dtype of the result == dtype of the 1st arg
148
+ @normalizer
149
+ def ldexp(
150
+ x1: ArrayLikeOrScalar,
151
+ x2: ArrayLikeOrScalar,
152
+ /,
153
+ out: Optional[OutArray] = None,
154
+ *,
155
+ where: NotImplementedType = True,
156
+ casting: Optional[CastingModes] = "same_kind",
157
+ order: NotImplementedType = "K",
158
+ dtype: Optional[DTypeLike] = None,
159
+ subok: NotImplementedType = False,
160
+ signature: NotImplementedType = None,
161
+ extobj: NotImplementedType = None,
162
+ ):
163
+ if dtype is not None:
164
+ if isinstance(x1, torch.Tensor):
165
+ x1 = _util.typecast_tensor(x1, dtype, casting)
166
+ else:
167
+ x1 = torch.as_tensor(x1, dtype=dtype)
168
+ else:
169
+ if not isinstance(x1, torch.Tensor):
170
+ x1 = torch.as_tensor(x1)
171
+ x1 = _util.cast_int_to_float(x1)
172
+
173
+ x2 = torch.as_tensor(x2)
174
+ # the second arg must be integer
175
+ if _dtypes_impl._category(x2.dtype) != 1:
176
+ raise ValueError("ldexp 2nd arg must be integer")
177
+
178
+ result = _binary_ufuncs_impl.ldexp(x1, x2)
179
+
180
+ if x1.dtype == torch.float16:
181
+ # torch.ldexp(f16, int) -> f32, undo it
182
+ result = result.to(torch.float16)
183
+
184
+ return _ufunc_postprocess(result, out, casting)
185
+
186
+
187
+ # nin=2, nout=2
188
+ @normalizer
189
+ def divmod(
190
+ x1: ArrayLike,
191
+ x2: ArrayLike,
192
+ out1: Optional[OutArray] = None,
193
+ out2: Optional[OutArray] = None,
194
+ /,
195
+ out: tuple[Optional[OutArray], Optional[OutArray]] = (None, None),
196
+ *,
197
+ where: NotImplementedType = True,
198
+ casting: Optional[CastingModes] = "same_kind",
199
+ order: NotImplementedType = "K",
200
+ dtype: Optional[DTypeLike] = None,
201
+ subok: NotImplementedType = False,
202
+ signature: NotImplementedType = None,
203
+ extobj: NotImplementedType = None,
204
+ ):
205
+ # make sure we either have no out arrays at all, or there is either
206
+ # out1, out2, or out=tuple, but not both
207
+ num_outs = sum(x is not None for x in [out1, out2])
208
+ if num_outs == 1:
209
+ raise ValueError("both out1 and out2 need to be provided")
210
+ elif num_outs == 2:
211
+ o1, o2 = out
212
+ if o1 is not None or o2 is not None:
213
+ raise TypeError(
214
+ "cannot specify 'out' as both a positional and keyword argument"
215
+ )
216
+ else:
217
+ out1, out2 = out
218
+
219
+ if dtype is None:
220
+ dtype = _dtypes_impl.result_type_impl(x1, x2)
221
+ x1, x2 = _util.typecast_tensors((x1, x2), dtype, casting)
222
+
223
+ quot, rem = _binary_ufuncs_impl.divmod(x1, x2)
224
+
225
+ quot = _ufunc_postprocess(quot, out1, casting)
226
+ rem = _ufunc_postprocess(rem, out2, casting)
227
+ return quot, rem
228
+
229
+
230
+ #
231
+ # Attach ufuncs to this module, for a further export to the public namespace in __init__.py
232
+ #
233
+ for name in _binary:
234
+ ufunc = getattr(_binary_ufuncs_impl, name)
235
+ vars()[name] = deco_binary_ufunc(ufunc)
236
+
237
+
238
+ def modf(x, /, *args, **kwds):
239
+ quot, rem = divmod(x, 1, *args, **kwds)
240
+ return rem, quot
241
+
242
+
243
+ _binary = _binary + ["divmod", "modf", "matmul", "ldexp"]
244
+
245
+
246
+ # ############# Unary ufuncs ######################
247
+
248
+
249
+ _unary = [
250
+ name
251
+ for name in dir(_unary_ufuncs_impl)
252
+ if not name.startswith("_") and name != "torch"
253
+ ]
254
+
255
+
256
+ # these are ufunc(int) -> float
257
+ _fp_unary = [
258
+ "arccos",
259
+ "arccosh",
260
+ "arcsin",
261
+ "arcsinh",
262
+ "arctan",
263
+ "arctanh",
264
+ "cbrt",
265
+ "cos",
266
+ "cosh",
267
+ "deg2rad",
268
+ "degrees",
269
+ "exp",
270
+ "exp2",
271
+ "expm1",
272
+ "log",
273
+ "log10",
274
+ "log1p",
275
+ "log2",
276
+ "rad2deg",
277
+ "radians",
278
+ "reciprocal",
279
+ "sin",
280
+ "sinh",
281
+ "sqrt",
282
+ "square",
283
+ "tan",
284
+ "tanh",
285
+ "trunc",
286
+ ]
287
+
288
+
289
+ def deco_unary_ufunc(torch_func):
290
+ """Common infra for unary ufuncs.
291
+
292
+ Normalize arguments, sort out type casting, broadcasting and delegate to
293
+ the pytorch functions for the actual work.
294
+ """
295
+
296
+ @normalizer
297
+ def wrapped(
298
+ x: ArrayLike,
299
+ /,
300
+ out: Optional[OutArray] = None,
301
+ *,
302
+ where=True,
303
+ casting: Optional[CastingModes] = "same_kind",
304
+ order="K",
305
+ dtype: Optional[DTypeLike] = None,
306
+ subok: NotImplementedType = False,
307
+ signature=None,
308
+ extobj=None,
309
+ ):
310
+ if dtype is not None:
311
+ x = _util.typecast_tensor(x, dtype, casting)
312
+
313
+ if torch_func.__name__ in _fp_unary:
314
+ x = _util.cast_int_to_float(x)
315
+
316
+ result = torch_func(x)
317
+ result = _ufunc_postprocess(result, out, casting)
318
+ return result
319
+
320
+ wrapped.__qualname__ = torch_func.__name__
321
+ wrapped.__name__ = torch_func.__name__
322
+
323
+ return wrapped
324
+
325
+
326
+ #
327
+ # Attach ufuncs to this module, for a further export to the public namespace in __init__.py
328
+ #
329
+ for name in _unary:
330
+ ufunc = getattr(_unary_ufuncs_impl, name)
331
+ vars()[name] = deco_unary_ufunc(ufunc)
332
+
333
+
334
+ __all__ = _binary + _unary # noqa: PLE0605
parrot/lib/python3.10/site-packages/torch/_numpy/fft.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ from __future__ import annotations
4
+
5
+ import functools
6
+
7
+ import torch
8
+
9
+ from . import _dtypes_impl, _util
10
+ from ._normalizations import ArrayLike, normalizer
11
+
12
+
13
+ def upcast(func):
14
+ """NumPy fft casts inputs to 64 bit and *returns 64-bit results*."""
15
+
16
+ @functools.wraps(func)
17
+ def wrapped(tensor, *args, **kwds):
18
+ target_dtype = (
19
+ _dtypes_impl.default_dtypes().complex_dtype
20
+ if tensor.is_complex()
21
+ else _dtypes_impl.default_dtypes().float_dtype
22
+ )
23
+ tensor = _util.cast_if_needed(tensor, target_dtype)
24
+ return func(tensor, *args, **kwds)
25
+
26
+ return wrapped
27
+
28
+
29
+ @normalizer
30
+ @upcast
31
+ def fft(a: ArrayLike, n=None, axis=-1, norm=None):
32
+ return torch.fft.fft(a, n, dim=axis, norm=norm)
33
+
34
+
35
+ @normalizer
36
+ @upcast
37
+ def ifft(a: ArrayLike, n=None, axis=-1, norm=None):
38
+ return torch.fft.ifft(a, n, dim=axis, norm=norm)
39
+
40
+
41
+ @normalizer
42
+ @upcast
43
+ def rfft(a: ArrayLike, n=None, axis=-1, norm=None):
44
+ return torch.fft.rfft(a, n, dim=axis, norm=norm)
45
+
46
+
47
+ @normalizer
48
+ @upcast
49
+ def irfft(a: ArrayLike, n=None, axis=-1, norm=None):
50
+ return torch.fft.irfft(a, n, dim=axis, norm=norm)
51
+
52
+
53
+ @normalizer
54
+ @upcast
55
+ def fftn(a: ArrayLike, s=None, axes=None, norm=None):
56
+ return torch.fft.fftn(a, s, dim=axes, norm=norm)
57
+
58
+
59
+ @normalizer
60
+ @upcast
61
+ def ifftn(a: ArrayLike, s=None, axes=None, norm=None):
62
+ return torch.fft.ifftn(a, s, dim=axes, norm=norm)
63
+
64
+
65
+ @normalizer
66
+ @upcast
67
+ def rfftn(a: ArrayLike, s=None, axes=None, norm=None):
68
+ return torch.fft.rfftn(a, s, dim=axes, norm=norm)
69
+
70
+
71
+ @normalizer
72
+ @upcast
73
+ def irfftn(a: ArrayLike, s=None, axes=None, norm=None):
74
+ return torch.fft.irfftn(a, s, dim=axes, norm=norm)
75
+
76
+
77
+ @normalizer
78
+ @upcast
79
+ def fft2(a: ArrayLike, s=None, axes=(-2, -1), norm=None):
80
+ return torch.fft.fft2(a, s, dim=axes, norm=norm)
81
+
82
+
83
+ @normalizer
84
+ @upcast
85
+ def ifft2(a: ArrayLike, s=None, axes=(-2, -1), norm=None):
86
+ return torch.fft.ifft2(a, s, dim=axes, norm=norm)
87
+
88
+
89
+ @normalizer
90
+ @upcast
91
+ def rfft2(a: ArrayLike, s=None, axes=(-2, -1), norm=None):
92
+ return torch.fft.rfft2(a, s, dim=axes, norm=norm)
93
+
94
+
95
+ @normalizer
96
+ @upcast
97
+ def irfft2(a: ArrayLike, s=None, axes=(-2, -1), norm=None):
98
+ return torch.fft.irfft2(a, s, dim=axes, norm=norm)
99
+
100
+
101
+ @normalizer
102
+ @upcast
103
+ def hfft(a: ArrayLike, n=None, axis=-1, norm=None):
104
+ return torch.fft.hfft(a, n, dim=axis, norm=norm)
105
+
106
+
107
+ @normalizer
108
+ @upcast
109
+ def ihfft(a: ArrayLike, n=None, axis=-1, norm=None):
110
+ return torch.fft.ihfft(a, n, dim=axis, norm=norm)
111
+
112
+
113
+ @normalizer
114
+ def fftfreq(n, d=1.0):
115
+ return torch.fft.fftfreq(n, d)
116
+
117
+
118
+ @normalizer
119
+ def rfftfreq(n, d=1.0):
120
+ return torch.fft.rfftfreq(n, d)
121
+
122
+
123
+ @normalizer
124
+ def fftshift(x: ArrayLike, axes=None):
125
+ return torch.fft.fftshift(x, axes)
126
+
127
+
128
+ @normalizer
129
+ def ifftshift(x: ArrayLike, axes=None):
130
+ return torch.fft.ifftshift(x, axes)
parrot/lib/python3.10/site-packages/torch/_numpy/random.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ """Wrapper to mimic (parts of) np.random API surface.
4
+
5
+ NumPy has strict guarantees on reproducibility etc; here we don't give any.
6
+
7
+ Q: default dtype is float64 in numpy
8
+
9
+ """
10
+ from __future__ import annotations
11
+
12
+ import functools
13
+ from math import sqrt
14
+ from typing import Optional
15
+
16
+ import torch
17
+
18
+ from . import _dtypes_impl, _util
19
+ from ._normalizations import array_or_scalar, ArrayLike, normalizer
20
+
21
+
22
+ __all__ = [
23
+ "seed",
24
+ "random_sample",
25
+ "sample",
26
+ "random",
27
+ "rand",
28
+ "randn",
29
+ "normal",
30
+ "choice",
31
+ "randint",
32
+ "shuffle",
33
+ "uniform",
34
+ ]
35
+
36
+
37
+ def use_numpy_random():
38
+ # local import to avoid ref cycles
39
+ import torch._dynamo.config as config
40
+
41
+ return config.use_numpy_random_stream
42
+
43
+
44
+ def deco_stream(func):
45
+ @functools.wraps(func)
46
+ def inner(*args, **kwds):
47
+ if not use_numpy_random():
48
+ return func(*args, **kwds)
49
+ else:
50
+ import numpy
51
+
52
+ from ._ndarray import ndarray
53
+
54
+ f = getattr(numpy.random, func.__name__)
55
+
56
+ # numpy funcs accept numpy ndarrays, unwrap
57
+ args = tuple(
58
+ arg.tensor.numpy() if isinstance(arg, ndarray) else arg for arg in args
59
+ )
60
+ kwds = {
61
+ key: val.tensor.numpy() if isinstance(val, ndarray) else val
62
+ for key, val in kwds.items()
63
+ }
64
+
65
+ value = f(*args, **kwds)
66
+
67
+ # `value` can be either numpy.ndarray or python scalar (or None)
68
+ if isinstance(value, numpy.ndarray):
69
+ value = ndarray(torch.as_tensor(value))
70
+
71
+ return value
72
+
73
+ return inner
74
+
75
+
76
+ @deco_stream
77
+ def seed(seed=None):
78
+ if seed is not None:
79
+ torch.random.manual_seed(seed)
80
+
81
+
82
+ @deco_stream
83
+ def random_sample(size=None):
84
+ if size is None:
85
+ size = ()
86
+ dtype = _dtypes_impl.default_dtypes().float_dtype
87
+ values = torch.empty(size, dtype=dtype).uniform_()
88
+ return array_or_scalar(values, return_scalar=size == ())
89
+
90
+
91
+ def rand(*size):
92
+ if size == ():
93
+ size = None
94
+ return random_sample(size)
95
+
96
+
97
+ sample = random_sample
98
+ random = random_sample
99
+
100
+
101
+ @deco_stream
102
+ def uniform(low=0.0, high=1.0, size=None):
103
+ if size is None:
104
+ size = ()
105
+ dtype = _dtypes_impl.default_dtypes().float_dtype
106
+ values = torch.empty(size, dtype=dtype).uniform_(low, high)
107
+ return array_or_scalar(values, return_scalar=size == ())
108
+
109
+
110
+ @deco_stream
111
+ def randn(*size):
112
+ dtype = _dtypes_impl.default_dtypes().float_dtype
113
+ values = torch.randn(size, dtype=dtype)
114
+ return array_or_scalar(values, return_scalar=size == ())
115
+
116
+
117
+ @deco_stream
118
+ def normal(loc=0.0, scale=1.0, size=None):
119
+ if size is None:
120
+ size = ()
121
+ dtype = _dtypes_impl.default_dtypes().float_dtype
122
+ values = torch.empty(size, dtype=dtype).normal_(loc, scale)
123
+ return array_or_scalar(values, return_scalar=size == ())
124
+
125
+
126
+ @deco_stream
127
+ def shuffle(x):
128
+ # no @normalizer because we do not cast e.g. lists to tensors
129
+ from ._ndarray import ndarray
130
+
131
+ if isinstance(x, torch.Tensor):
132
+ tensor = x
133
+ elif isinstance(x, ndarray):
134
+ tensor = x.tensor
135
+ else:
136
+ raise NotImplementedError("We do not random.shuffle lists in-place")
137
+
138
+ perm = torch.randperm(tensor.shape[0])
139
+ xp = tensor[perm]
140
+ tensor.copy_(xp)
141
+
142
+
143
+ @deco_stream
144
+ def randint(low, high=None, size=None):
145
+ if size is None:
146
+ size = ()
147
+ if not isinstance(size, (tuple, list)):
148
+ size = (size,)
149
+ if high is None:
150
+ low, high = 0, low
151
+ values = torch.randint(low, high, size=size)
152
+ return array_or_scalar(values, int, return_scalar=size == ())
153
+
154
+
155
+ @deco_stream
156
+ @normalizer
157
+ def choice(a: ArrayLike, size=None, replace=True, p: Optional[ArrayLike] = None):
158
+ # https://stackoverflow.com/questions/59461811/random-choice-with-pytorch
159
+ if a.numel() == 1:
160
+ a = torch.arange(a)
161
+
162
+ # TODO: check a.dtype is integer -- cf np.random.choice(3.4) which raises
163
+
164
+ # number of draws
165
+ if size is None:
166
+ num_el = 1
167
+ elif _util.is_sequence(size):
168
+ num_el = 1
169
+ for el in size:
170
+ num_el *= el
171
+ else:
172
+ num_el = size
173
+
174
+ # prepare the probabilities
175
+ if p is None:
176
+ p = torch.ones_like(a) / a.shape[0]
177
+
178
+ # cf https://github.com/numpy/numpy/blob/main/numpy/random/mtrand.pyx#L973
179
+ atol = sqrt(torch.finfo(p.dtype).eps)
180
+ if abs(p.sum() - 1.0) > atol:
181
+ raise ValueError("probabilities do not sum to 1.")
182
+
183
+ # actually sample
184
+ indices = torch.multinomial(p, num_el, replacement=replace)
185
+
186
+ if _util.is_sequence(size):
187
+ indices = indices.reshape(size)
188
+
189
+ samples = a[indices]
190
+
191
+ return samples
parrot/lib/python3.10/site-packages/torch/_numpy/testing/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ from .utils import (
4
+ _gen_alignment_data,
5
+ assert_,
6
+ assert_allclose,
7
+ assert_almost_equal,
8
+ assert_array_almost_equal,
9
+ assert_array_equal,
10
+ assert_array_less,
11
+ assert_equal,
12
+ assert_raises_regex,
13
+ assert_warns,
14
+ HAS_REFCOUNT,
15
+ IS_WASM,
16
+ suppress_warnings,
17
+ )
18
+
19
+ # from .testing import assert_allclose # FIXME
parrot/lib/python3.10/site-packages/torch/_numpy/testing/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (533 Bytes). View file
 
parrot/lib/python3.10/site-packages/torch/_numpy/testing/__pycache__/utils.cpython-310.pyc ADDED
Binary file (63 kB). View file
 
parrot/lib/python3.10/site-packages/torch/_numpy/testing/utils.py ADDED
@@ -0,0 +1,2390 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: ignore-errors
2
+
3
+ """
4
+ Utility function to facilitate testing.
5
+
6
+ """
7
+ import contextlib
8
+ import gc
9
+ import operator
10
+ import os
11
+ import platform
12
+ import pprint
13
+ import re
14
+ import shutil
15
+ import sys
16
+ import warnings
17
+ from functools import wraps
18
+ from io import StringIO
19
+ from tempfile import mkdtemp, mkstemp
20
+ from warnings import WarningMessage
21
+
22
+ import torch._numpy as np
23
+ from torch._numpy import arange, asarray as asanyarray, empty, float32, intp, ndarray
24
+
25
+ __all__ = [
26
+ "assert_equal",
27
+ "assert_almost_equal",
28
+ "assert_approx_equal",
29
+ "assert_array_equal",
30
+ "assert_array_less",
31
+ "assert_string_equal",
32
+ "assert_",
33
+ "assert_array_almost_equal",
34
+ "build_err_msg",
35
+ "decorate_methods",
36
+ "print_assert_equal",
37
+ "verbose",
38
+ "assert_",
39
+ "assert_array_almost_equal_nulp",
40
+ "assert_raises_regex",
41
+ "assert_array_max_ulp",
42
+ "assert_warns",
43
+ "assert_no_warnings",
44
+ "assert_allclose",
45
+ "IgnoreException",
46
+ "clear_and_catch_warnings",
47
+ "temppath",
48
+ "tempdir",
49
+ "IS_PYPY",
50
+ "HAS_REFCOUNT",
51
+ "IS_WASM",
52
+ "suppress_warnings",
53
+ "assert_array_compare",
54
+ "assert_no_gc_cycles",
55
+ "break_cycles",
56
+ "IS_PYSTON",
57
+ ]
58
+
59
+
60
+ verbose = 0
61
+
62
+ IS_WASM = platform.machine() in ["wasm32", "wasm64"]
63
+ IS_PYPY = sys.implementation.name == "pypy"
64
+ IS_PYSTON = hasattr(sys, "pyston_version_info")
65
+ HAS_REFCOUNT = getattr(sys, "getrefcount", None) is not None and not IS_PYSTON
66
+
67
+
68
+ def assert_(val, msg=""):
69
+ """
70
+ Assert that works in release mode.
71
+ Accepts callable msg to allow deferring evaluation until failure.
72
+
73
+ The Python built-in ``assert`` does not work when executing code in
74
+ optimized mode (the ``-O`` flag) - no byte-code is generated for it.
75
+
76
+ For documentation on usage, refer to the Python documentation.
77
+
78
+ """
79
+ __tracebackhide__ = True # Hide traceback for py.test
80
+ if not val:
81
+ try:
82
+ smsg = msg()
83
+ except TypeError:
84
+ smsg = msg
85
+ raise AssertionError(smsg)
86
+
87
+
88
+ def gisnan(x):
89
+ return np.isnan(x)
90
+
91
+
92
+ def gisfinite(x):
93
+ return np.isfinite(x)
94
+
95
+
96
+ def gisinf(x):
97
+ return np.isinf(x)
98
+
99
+
100
+ def build_err_msg(
101
+ arrays,
102
+ err_msg,
103
+ header="Items are not equal:",
104
+ verbose=True,
105
+ names=("ACTUAL", "DESIRED"),
106
+ precision=8,
107
+ ):
108
+ msg = ["\n" + header]
109
+ if err_msg:
110
+ if err_msg.find("\n") == -1 and len(err_msg) < 79 - len(header):
111
+ msg = [msg[0] + " " + err_msg]
112
+ else:
113
+ msg.append(err_msg)
114
+ if verbose:
115
+ for i, a in enumerate(arrays):
116
+ if isinstance(a, ndarray):
117
+ # precision argument is only needed if the objects are ndarrays
118
+ # r_func = partial(array_repr, precision=precision)
119
+ r_func = ndarray.__repr__
120
+ else:
121
+ r_func = repr
122
+
123
+ try:
124
+ r = r_func(a)
125
+ except Exception as exc:
126
+ r = f"[repr failed for <{type(a).__name__}>: {exc}]"
127
+ if r.count("\n") > 3:
128
+ r = "\n".join(r.splitlines()[:3])
129
+ r += "..."
130
+ msg.append(f" {names[i]}: {r}")
131
+ return "\n".join(msg)
132
+
133
+
134
+ def assert_equal(actual, desired, err_msg="", verbose=True):
135
+ """
136
+ Raises an AssertionError if two objects are not equal.
137
+
138
+ Given two objects (scalars, lists, tuples, dictionaries or numpy arrays),
139
+ check that all elements of these objects are equal. An exception is raised
140
+ at the first conflicting values.
141
+
142
+ When one of `actual` and `desired` is a scalar and the other is array_like,
143
+ the function checks that each element of the array_like object is equal to
144
+ the scalar.
145
+
146
+ This function handles NaN comparisons as if NaN was a "normal" number.
147
+ That is, AssertionError is not raised if both objects have NaNs in the same
148
+ positions. This is in contrast to the IEEE standard on NaNs, which says
149
+ that NaN compared to anything must return False.
150
+
151
+ Parameters
152
+ ----------
153
+ actual : array_like
154
+ The object to check.
155
+ desired : array_like
156
+ The expected object.
157
+ err_msg : str, optional
158
+ The error message to be printed in case of failure.
159
+ verbose : bool, optional
160
+ If True, the conflicting values are appended to the error message.
161
+
162
+ Raises
163
+ ------
164
+ AssertionError
165
+ If actual and desired are not equal.
166
+
167
+ Examples
168
+ --------
169
+ >>> np.testing.assert_equal([4,5], [4,6])
170
+ Traceback (most recent call last):
171
+ ...
172
+ AssertionError:
173
+ Items are not equal:
174
+ item=1
175
+ ACTUAL: 5
176
+ DESIRED: 6
177
+
178
+ The following comparison does not raise an exception. There are NaNs
179
+ in the inputs, but they are in the same positions.
180
+
181
+ >>> np.testing.assert_equal(np.array([1.0, 2.0, np.nan]), [1, 2, np.nan])
182
+
183
+ """
184
+ __tracebackhide__ = True # Hide traceback for py.test
185
+
186
+ num_nones = sum([actual is None, desired is None])
187
+ if num_nones == 1:
188
+ raise AssertionError(f"Not equal: {actual} != {desired}")
189
+ elif num_nones == 2:
190
+ return True
191
+ # else, carry on
192
+
193
+ if isinstance(actual, np.DType) or isinstance(desired, np.DType):
194
+ result = actual == desired
195
+ if not result:
196
+ raise AssertionError(f"Not equal: {actual} != {desired}")
197
+ else:
198
+ return True
199
+
200
+ if isinstance(desired, str) and isinstance(actual, str):
201
+ assert actual == desired
202
+ return
203
+
204
+ if isinstance(desired, dict):
205
+ if not isinstance(actual, dict):
206
+ raise AssertionError(repr(type(actual)))
207
+ assert_equal(len(actual), len(desired), err_msg, verbose)
208
+ for k in desired.keys():
209
+ if k not in actual:
210
+ raise AssertionError(repr(k))
211
+ assert_equal(actual[k], desired[k], f"key={k!r}\n{err_msg}", verbose)
212
+ return
213
+ if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)):
214
+ assert_equal(len(actual), len(desired), err_msg, verbose)
215
+ for k in range(len(desired)):
216
+ assert_equal(actual[k], desired[k], f"item={k!r}\n{err_msg}", verbose)
217
+ return
218
+
219
+ from torch._numpy import imag, iscomplexobj, isscalar, ndarray, real, signbit
220
+
221
+ if isinstance(actual, ndarray) or isinstance(desired, ndarray):
222
+ return assert_array_equal(actual, desired, err_msg, verbose)
223
+ msg = build_err_msg([actual, desired], err_msg, verbose=verbose)
224
+
225
+ # Handle complex numbers: separate into real/imag to handle
226
+ # nan/inf/negative zero correctly
227
+ # XXX: catch ValueError for subclasses of ndarray where iscomplex fail
228
+ try:
229
+ usecomplex = iscomplexobj(actual) or iscomplexobj(desired)
230
+ except (ValueError, TypeError):
231
+ usecomplex = False
232
+
233
+ if usecomplex:
234
+ if iscomplexobj(actual):
235
+ actualr = real(actual)
236
+ actuali = imag(actual)
237
+ else:
238
+ actualr = actual
239
+ actuali = 0
240
+ if iscomplexobj(desired):
241
+ desiredr = real(desired)
242
+ desiredi = imag(desired)
243
+ else:
244
+ desiredr = desired
245
+ desiredi = 0
246
+ try:
247
+ assert_equal(actualr, desiredr)
248
+ assert_equal(actuali, desiredi)
249
+ except AssertionError:
250
+ raise AssertionError(msg) # noqa: B904
251
+
252
+ # isscalar test to check cases such as [np.nan] != np.nan
253
+ if isscalar(desired) != isscalar(actual):
254
+ raise AssertionError(msg)
255
+
256
+ # Inf/nan/negative zero handling
257
+ try:
258
+ isdesnan = gisnan(desired)
259
+ isactnan = gisnan(actual)
260
+ if isdesnan and isactnan:
261
+ return # both nan, so equal
262
+
263
+ # handle signed zero specially for floats
264
+ array_actual = np.asarray(actual)
265
+ array_desired = np.asarray(desired)
266
+
267
+ if desired == 0 and actual == 0:
268
+ if not signbit(desired) == signbit(actual):
269
+ raise AssertionError(msg)
270
+
271
+ except (TypeError, ValueError, NotImplementedError):
272
+ pass
273
+
274
+ try:
275
+ # Explicitly use __eq__ for comparison, gh-2552
276
+ if not (desired == actual):
277
+ raise AssertionError(msg)
278
+
279
+ except (DeprecationWarning, FutureWarning) as e:
280
+ # this handles the case when the two types are not even comparable
281
+ if "elementwise == comparison" in e.args[0]:
282
+ raise AssertionError(msg) # noqa: B904
283
+ else:
284
+ raise
285
+
286
+
287
+ def print_assert_equal(test_string, actual, desired):
288
+ """
289
+ Test if two objects are equal, and print an error message if test fails.
290
+
291
+ The test is performed with ``actual == desired``.
292
+
293
+ Parameters
294
+ ----------
295
+ test_string : str
296
+ The message supplied to AssertionError.
297
+ actual : object
298
+ The object to test for equality against `desired`.
299
+ desired : object
300
+ The expected result.
301
+
302
+ Examples
303
+ --------
304
+ >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1]) # doctest: +SKIP
305
+ >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 2]) # doctest: +SKIP
306
+ Traceback (most recent call last):
307
+ ...
308
+ AssertionError: Test XYZ of func xyz failed
309
+ ACTUAL:
310
+ [0, 1]
311
+ DESIRED:
312
+ [0, 2]
313
+
314
+ """
315
+ __tracebackhide__ = True # Hide traceback for py.test
316
+ import pprint
317
+
318
+ if not (actual == desired):
319
+ msg = StringIO()
320
+ msg.write(test_string)
321
+ msg.write(" failed\nACTUAL: \n")
322
+ pprint.pprint(actual, msg)
323
+ msg.write("DESIRED: \n")
324
+ pprint.pprint(desired, msg)
325
+ raise AssertionError(msg.getvalue())
326
+
327
+
328
+ def assert_almost_equal(actual, desired, decimal=7, err_msg="", verbose=True):
329
+ """
330
+ Raises an AssertionError if two items are not equal up to desired
331
+ precision.
332
+
333
+ .. note:: It is recommended to use one of `assert_allclose`,
334
+ `assert_array_almost_equal_nulp` or `assert_array_max_ulp`
335
+ instead of this function for more consistent floating point
336
+ comparisons.
337
+
338
+ The test verifies that the elements of `actual` and `desired` satisfy.
339
+
340
+ ``abs(desired-actual) < float64(1.5 * 10**(-decimal))``
341
+
342
+ That is a looser test than originally documented, but agrees with what the
343
+ actual implementation in `assert_array_almost_equal` did up to rounding
344
+ vagaries. An exception is raised at conflicting values. For ndarrays this
345
+ delegates to assert_array_almost_equal
346
+
347
+ Parameters
348
+ ----------
349
+ actual : array_like
350
+ The object to check.
351
+ desired : array_like
352
+ The expected object.
353
+ decimal : int, optional
354
+ Desired precision, default is 7.
355
+ err_msg : str, optional
356
+ The error message to be printed in case of failure.
357
+ verbose : bool, optional
358
+ If True, the conflicting values are appended to the error message.
359
+
360
+ Raises
361
+ ------
362
+ AssertionError
363
+ If actual and desired are not equal up to specified precision.
364
+
365
+ See Also
366
+ --------
367
+ assert_allclose: Compare two array_like objects for equality with desired
368
+ relative and/or absolute precision.
369
+ assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
370
+
371
+ Examples
372
+ --------
373
+ >>> from torch._numpy.testing import assert_almost_equal
374
+ >>> assert_almost_equal(2.3333333333333, 2.33333334)
375
+ >>> assert_almost_equal(2.3333333333333, 2.33333334, decimal=10)
376
+ Traceback (most recent call last):
377
+ ...
378
+ AssertionError:
379
+ Arrays are not almost equal to 10 decimals
380
+ ACTUAL: 2.3333333333333
381
+ DESIRED: 2.33333334
382
+
383
+ >>> assert_almost_equal(np.array([1.0,2.3333333333333]),
384
+ ... np.array([1.0,2.33333334]), decimal=9)
385
+ Traceback (most recent call last):
386
+ ...
387
+ AssertionError:
388
+ Arrays are not almost equal to 9 decimals
389
+ <BLANKLINE>
390
+ Mismatched elements: 1 / 2 (50%)
391
+ Max absolute difference: 6.666699636781459e-09
392
+ Max relative difference: 2.8571569790287484e-09
393
+ x: torch.ndarray([1.0000, 2.3333], dtype=float64)
394
+ y: torch.ndarray([1.0000, 2.3333], dtype=float64)
395
+
396
+ """
397
+ __tracebackhide__ = True # Hide traceback for py.test
398
+ from torch._numpy import imag, iscomplexobj, ndarray, real
399
+
400
+ # Handle complex numbers: separate into real/imag to handle
401
+ # nan/inf/negative zero correctly
402
+ # XXX: catch ValueError for subclasses of ndarray where iscomplex fail
403
+ try:
404
+ usecomplex = iscomplexobj(actual) or iscomplexobj(desired)
405
+ except ValueError:
406
+ usecomplex = False
407
+
408
+ def _build_err_msg():
409
+ header = "Arrays are not almost equal to %d decimals" % decimal
410
+ return build_err_msg([actual, desired], err_msg, verbose=verbose, header=header)
411
+
412
+ if usecomplex:
413
+ if iscomplexobj(actual):
414
+ actualr = real(actual)
415
+ actuali = imag(actual)
416
+ else:
417
+ actualr = actual
418
+ actuali = 0
419
+ if iscomplexobj(desired):
420
+ desiredr = real(desired)
421
+ desiredi = imag(desired)
422
+ else:
423
+ desiredr = desired
424
+ desiredi = 0
425
+ try:
426
+ assert_almost_equal(actualr, desiredr, decimal=decimal)
427
+ assert_almost_equal(actuali, desiredi, decimal=decimal)
428
+ except AssertionError:
429
+ raise AssertionError(_build_err_msg()) # noqa: B904
430
+
431
+ if isinstance(actual, (ndarray, tuple, list)) or isinstance(
432
+ desired, (ndarray, tuple, list)
433
+ ):
434
+ return assert_array_almost_equal(actual, desired, decimal, err_msg)
435
+ try:
436
+ # If one of desired/actual is not finite, handle it specially here:
437
+ # check that both are nan if any is a nan, and test for equality
438
+ # otherwise
439
+ if not (gisfinite(desired) and gisfinite(actual)):
440
+ if gisnan(desired) or gisnan(actual):
441
+ if not (gisnan(desired) and gisnan(actual)):
442
+ raise AssertionError(_build_err_msg())
443
+ else:
444
+ if not desired == actual:
445
+ raise AssertionError(_build_err_msg())
446
+ return
447
+ except (NotImplementedError, TypeError):
448
+ pass
449
+ if abs(desired - actual) >= np.float64(1.5 * 10.0 ** (-decimal)):
450
+ raise AssertionError(_build_err_msg())
451
+
452
+
453
+ def assert_approx_equal(actual, desired, significant=7, err_msg="", verbose=True):
454
+ """
455
+ Raises an AssertionError if two items are not equal up to significant
456
+ digits.
457
+
458
+ .. note:: It is recommended to use one of `assert_allclose`,
459
+ `assert_array_almost_equal_nulp` or `assert_array_max_ulp`
460
+ instead of this function for more consistent floating point
461
+ comparisons.
462
+
463
+ Given two numbers, check that they are approximately equal.
464
+ Approximately equal is defined as the number of significant digits
465
+ that agree.
466
+
467
+ Parameters
468
+ ----------
469
+ actual : scalar
470
+ The object to check.
471
+ desired : scalar
472
+ The expected object.
473
+ significant : int, optional
474
+ Desired precision, default is 7.
475
+ err_msg : str, optional
476
+ The error message to be printed in case of failure.
477
+ verbose : bool, optional
478
+ If True, the conflicting values are appended to the error message.
479
+
480
+ Raises
481
+ ------
482
+ AssertionError
483
+ If actual and desired are not equal up to specified precision.
484
+
485
+ See Also
486
+ --------
487
+ assert_allclose: Compare two array_like objects for equality with desired
488
+ relative and/or absolute precision.
489
+ assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
490
+
491
+ Examples
492
+ --------
493
+ >>> np.testing.assert_approx_equal(0.12345677777777e-20, 0.1234567e-20) # doctest: +SKIP
494
+ >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345671e-20, # doctest: +SKIP
495
+ ... significant=8)
496
+ >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345672e-20, # doctest: +SKIP
497
+ ... significant=8)
498
+ Traceback (most recent call last):
499
+ ...
500
+ AssertionError:
501
+ Items are not equal to 8 significant digits:
502
+ ACTUAL: 1.234567e-21
503
+ DESIRED: 1.2345672e-21
504
+
505
+ the evaluated condition that raises the exception is
506
+
507
+ >>> abs(0.12345670e-20/1e-21 - 0.12345672e-20/1e-21) >= 10**-(8-1)
508
+ True
509
+
510
+ """
511
+ __tracebackhide__ = True # Hide traceback for py.test
512
+ import numpy as np
513
+
514
+ (actual, desired) = map(float, (actual, desired))
515
+ if desired == actual:
516
+ return
517
+ # Normalized the numbers to be in range (-10.0,10.0)
518
+ # scale = float(pow(10,math.floor(math.log10(0.5*(abs(desired)+abs(actual))))))
519
+ scale = 0.5 * (np.abs(desired) + np.abs(actual))
520
+ scale = np.power(10, np.floor(np.log10(scale)))
521
+ try:
522
+ sc_desired = desired / scale
523
+ except ZeroDivisionError:
524
+ sc_desired = 0.0
525
+ try:
526
+ sc_actual = actual / scale
527
+ except ZeroDivisionError:
528
+ sc_actual = 0.0
529
+ msg = build_err_msg(
530
+ [actual, desired],
531
+ err_msg,
532
+ header="Items are not equal to %d significant digits:" % significant,
533
+ verbose=verbose,
534
+ )
535
+ try:
536
+ # If one of desired/actual is not finite, handle it specially here:
537
+ # check that both are nan if any is a nan, and test for equality
538
+ # otherwise
539
+ if not (gisfinite(desired) and gisfinite(actual)):
540
+ if gisnan(desired) or gisnan(actual):
541
+ if not (gisnan(desired) and gisnan(actual)):
542
+ raise AssertionError(msg)
543
+ else:
544
+ if not desired == actual:
545
+ raise AssertionError(msg)
546
+ return
547
+ except (TypeError, NotImplementedError):
548
+ pass
549
+ if np.abs(sc_desired - sc_actual) >= np.power(10.0, -(significant - 1)):
550
+ raise AssertionError(msg)
551
+
552
+
553
+ def assert_array_compare(
554
+ comparison,
555
+ x,
556
+ y,
557
+ err_msg="",
558
+ verbose=True,
559
+ header="",
560
+ precision=6,
561
+ equal_nan=True,
562
+ equal_inf=True,
563
+ *,
564
+ strict=False,
565
+ ):
566
+ __tracebackhide__ = True # Hide traceback for py.test
567
+ from torch._numpy import all, array, asarray, bool_, inf, isnan, max
568
+
569
+ x = asarray(x)
570
+ y = asarray(y)
571
+
572
+ def array2string(a):
573
+ return str(a)
574
+
575
+ # original array for output formatting
576
+ ox, oy = x, y
577
+
578
+ def func_assert_same_pos(x, y, func=isnan, hasval="nan"):
579
+ """Handling nan/inf.
580
+
581
+ Combine results of running func on x and y, checking that they are True
582
+ at the same locations.
583
+
584
+ """
585
+ __tracebackhide__ = True # Hide traceback for py.test
586
+ x_id = func(x)
587
+ y_id = func(y)
588
+ # We include work-arounds here to handle three types of slightly
589
+ # pathological ndarray subclasses:
590
+ # (1) all() on `masked` array scalars can return masked arrays, so we
591
+ # use != True
592
+ # (2) __eq__ on some ndarray subclasses returns Python booleans
593
+ # instead of element-wise comparisons, so we cast to bool_() and
594
+ # use isinstance(..., bool) checks
595
+ # (3) subclasses with bare-bones __array_function__ implementations may
596
+ # not implement np.all(), so favor using the .all() method
597
+ # We are not committed to supporting such subclasses, but it's nice to
598
+ # support them if possible.
599
+ if (x_id == y_id).all().item() is not True:
600
+ msg = build_err_msg(
601
+ [x, y],
602
+ err_msg + f"\nx and y {hasval} location mismatch:",
603
+ verbose=verbose,
604
+ header=header,
605
+ names=("x", "y"),
606
+ precision=precision,
607
+ )
608
+ raise AssertionError(msg)
609
+ # If there is a scalar, then here we know the array has the same
610
+ # flag as it everywhere, so we should return the scalar flag.
611
+ if isinstance(x_id, bool) or x_id.ndim == 0:
612
+ return bool_(x_id)
613
+ elif isinstance(y_id, bool) or y_id.ndim == 0:
614
+ return bool_(y_id)
615
+ else:
616
+ return y_id
617
+
618
+ try:
619
+ if strict:
620
+ cond = x.shape == y.shape and x.dtype == y.dtype
621
+ else:
622
+ cond = (x.shape == () or y.shape == ()) or x.shape == y.shape
623
+ if not cond:
624
+ if x.shape != y.shape:
625
+ reason = f"\n(shapes {x.shape}, {y.shape} mismatch)"
626
+ else:
627
+ reason = f"\n(dtypes {x.dtype}, {y.dtype} mismatch)"
628
+ msg = build_err_msg(
629
+ [x, y],
630
+ err_msg + reason,
631
+ verbose=verbose,
632
+ header=header,
633
+ names=("x", "y"),
634
+ precision=precision,
635
+ )
636
+ raise AssertionError(msg)
637
+
638
+ flagged = bool_(False)
639
+
640
+ if equal_nan:
641
+ flagged = func_assert_same_pos(x, y, func=isnan, hasval="nan")
642
+
643
+ if equal_inf:
644
+ flagged |= func_assert_same_pos(
645
+ x, y, func=lambda xy: xy == +inf, hasval="+inf"
646
+ )
647
+ flagged |= func_assert_same_pos(
648
+ x, y, func=lambda xy: xy == -inf, hasval="-inf"
649
+ )
650
+
651
+ if flagged.ndim > 0:
652
+ x, y = x[~flagged], y[~flagged]
653
+ # Only do the comparison if actual values are left
654
+ if x.size == 0:
655
+ return
656
+ elif flagged:
657
+ # no sense doing comparison if everything is flagged.
658
+ return
659
+
660
+ val = comparison(x, y)
661
+
662
+ if isinstance(val, bool):
663
+ cond = val
664
+ reduced = array([val])
665
+ else:
666
+ reduced = val.ravel()
667
+ cond = reduced.all()
668
+
669
+ # The below comparison is a hack to ensure that fully masked
670
+ # results, for which val.ravel().all() returns np.ma.masked,
671
+ # do not trigger a failure (np.ma.masked != True evaluates as
672
+ # np.ma.masked, which is falsy).
673
+ if not cond:
674
+ n_mismatch = reduced.size - int(reduced.sum(dtype=intp))
675
+ n_elements = flagged.size if flagged.ndim != 0 else reduced.size
676
+ percent_mismatch = 100 * n_mismatch / n_elements
677
+ remarks = [
678
+ f"Mismatched elements: {n_mismatch} / {n_elements} ({percent_mismatch:.3g}%)"
679
+ ]
680
+
681
+ # with errstate(all='ignore'):
682
+ # ignore errors for non-numeric types
683
+ with contextlib.suppress(TypeError, RuntimeError):
684
+ error = abs(x - y)
685
+ if np.issubdtype(x.dtype, np.unsignedinteger):
686
+ error2 = abs(y - x)
687
+ np.minimum(error, error2, out=error)
688
+ max_abs_error = max(error)
689
+ remarks.append(
690
+ "Max absolute difference: " + array2string(max_abs_error.item())
691
+ )
692
+
693
+ # note: this definition of relative error matches that one
694
+ # used by assert_allclose (found in np.isclose)
695
+ # Filter values where the divisor would be zero
696
+ nonzero = bool_(y != 0)
697
+ if all(~nonzero):
698
+ max_rel_error = array(inf)
699
+ else:
700
+ max_rel_error = max(error[nonzero] / abs(y[nonzero]))
701
+ remarks.append(
702
+ "Max relative difference: " + array2string(max_rel_error.item())
703
+ )
704
+
705
+ err_msg += "\n" + "\n".join(remarks)
706
+ msg = build_err_msg(
707
+ [ox, oy],
708
+ err_msg,
709
+ verbose=verbose,
710
+ header=header,
711
+ names=("x", "y"),
712
+ precision=precision,
713
+ )
714
+ raise AssertionError(msg)
715
+ except ValueError:
716
+ import traceback
717
+
718
+ efmt = traceback.format_exc()
719
+ header = f"error during assertion:\n\n{efmt}\n\n{header}"
720
+
721
+ msg = build_err_msg(
722
+ [x, y],
723
+ err_msg,
724
+ verbose=verbose,
725
+ header=header,
726
+ names=("x", "y"),
727
+ precision=precision,
728
+ )
729
+ raise ValueError(msg) # noqa: B904
730
+
731
+
732
+ def assert_array_equal(x, y, err_msg="", verbose=True, *, strict=False):
733
+ """
734
+ Raises an AssertionError if two array_like objects are not equal.
735
+
736
+ Given two array_like objects, check that the shape is equal and all
737
+ elements of these objects are equal (but see the Notes for the special
738
+ handling of a scalar). An exception is raised at shape mismatch or
739
+ conflicting values. In contrast to the standard usage in numpy, NaNs
740
+ are compared like numbers, no assertion is raised if both objects have
741
+ NaNs in the same positions.
742
+
743
+ The usual caution for verifying equality with floating point numbers is
744
+ advised.
745
+
746
+ Parameters
747
+ ----------
748
+ x : array_like
749
+ The actual object to check.
750
+ y : array_like
751
+ The desired, expected object.
752
+ err_msg : str, optional
753
+ The error message to be printed in case of failure.
754
+ verbose : bool, optional
755
+ If True, the conflicting values are appended to the error message.
756
+ strict : bool, optional
757
+ If True, raise an AssertionError when either the shape or the data
758
+ type of the array_like objects does not match. The special
759
+ handling for scalars mentioned in the Notes section is disabled.
760
+
761
+ Raises
762
+ ------
763
+ AssertionError
764
+ If actual and desired objects are not equal.
765
+
766
+ See Also
767
+ --------
768
+ assert_allclose: Compare two array_like objects for equality with desired
769
+ relative and/or absolute precision.
770
+ assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
771
+
772
+ Notes
773
+ -----
774
+ When one of `x` and `y` is a scalar and the other is array_like, the
775
+ function checks that each element of the array_like object is equal to
776
+ the scalar. This behaviour can be disabled with the `strict` parameter.
777
+
778
+ Examples
779
+ --------
780
+ The first assert does not raise an exception:
781
+
782
+ >>> np.testing.assert_array_equal([1.0,2.33333,np.nan],
783
+ ... [np.exp(0),2.33333, np.nan])
784
+
785
+ Use `assert_allclose` or one of the nulp (number of floating point values)
786
+ functions for these cases instead:
787
+
788
+ >>> np.testing.assert_allclose([1.0,np.pi,np.nan],
789
+ ... [1, np.sqrt(np.pi)**2, np.nan],
790
+ ... rtol=1e-10, atol=0)
791
+
792
+ As mentioned in the Notes section, `assert_array_equal` has special
793
+ handling for scalars. Here the test checks that each value in `x` is 3:
794
+
795
+ >>> x = np.full((2, 5), fill_value=3)
796
+ >>> np.testing.assert_array_equal(x, 3)
797
+
798
+ Use `strict` to raise an AssertionError when comparing a scalar with an
799
+ array:
800
+
801
+ >>> np.testing.assert_array_equal(x, 3, strict=True)
802
+ Traceback (most recent call last):
803
+ ...
804
+ AssertionError:
805
+ Arrays are not equal
806
+ <BLANKLINE>
807
+ (shapes (2, 5), () mismatch)
808
+ x: torch.ndarray([[3, 3, 3, 3, 3],
809
+ [3, 3, 3, 3, 3]])
810
+ y: torch.ndarray(3)
811
+
812
+ The `strict` parameter also ensures that the array data types match:
813
+
814
+ >>> x = np.array([2, 2, 2])
815
+ >>> y = np.array([2., 2., 2.], dtype=np.float32)
816
+ >>> np.testing.assert_array_equal(x, y, strict=True)
817
+ Traceback (most recent call last):
818
+ ...
819
+ AssertionError:
820
+ Arrays are not equal
821
+ <BLANKLINE>
822
+ (dtypes dtype("int64"), dtype("float32") mismatch)
823
+ x: torch.ndarray([2, 2, 2])
824
+ y: torch.ndarray([2., 2., 2.])
825
+ """
826
+ __tracebackhide__ = True # Hide traceback for py.test
827
+ assert_array_compare(
828
+ operator.__eq__,
829
+ x,
830
+ y,
831
+ err_msg=err_msg,
832
+ verbose=verbose,
833
+ header="Arrays are not equal",
834
+ strict=strict,
835
+ )
836
+
837
+
838
+ def assert_array_almost_equal(x, y, decimal=6, err_msg="", verbose=True):
839
+ """
840
+ Raises an AssertionError if two objects are not equal up to desired
841
+ precision.
842
+
843
+ .. note:: It is recommended to use one of `assert_allclose`,
844
+ `assert_array_almost_equal_nulp` or `assert_array_max_ulp`
845
+ instead of this function for more consistent floating point
846
+ comparisons.
847
+
848
+ The test verifies identical shapes and that the elements of ``actual`` and
849
+ ``desired`` satisfy.
850
+
851
+ ``abs(desired-actual) < 1.5 * 10**(-decimal)``
852
+
853
+ That is a looser test than originally documented, but agrees with what the
854
+ actual implementation did up to rounding vagaries. An exception is raised
855
+ at shape mismatch or conflicting values. In contrast to the standard usage
856
+ in numpy, NaNs are compared like numbers, no assertion is raised if both
857
+ objects have NaNs in the same positions.
858
+
859
+ Parameters
860
+ ----------
861
+ x : array_like
862
+ The actual object to check.
863
+ y : array_like
864
+ The desired, expected object.
865
+ decimal : int, optional
866
+ Desired precision, default is 6.
867
+ err_msg : str, optional
868
+ The error message to be printed in case of failure.
869
+ verbose : bool, optional
870
+ If True, the conflicting values are appended to the error message.
871
+
872
+ Raises
873
+ ------
874
+ AssertionError
875
+ If actual and desired are not equal up to specified precision.
876
+
877
+ See Also
878
+ --------
879
+ assert_allclose: Compare two array_like objects for equality with desired
880
+ relative and/or absolute precision.
881
+ assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
882
+
883
+ Examples
884
+ --------
885
+ the first assert does not raise an exception
886
+
887
+ >>> np.testing.assert_array_almost_equal([1.0,2.333,np.nan],
888
+ ... [1.0,2.333,np.nan])
889
+
890
+ >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
891
+ ... [1.0,2.33339,np.nan], decimal=5)
892
+ Traceback (most recent call last):
893
+ ...
894
+ AssertionError:
895
+ Arrays are not almost equal to 5 decimals
896
+ <BLANKLINE>
897
+ Mismatched elements: 1 / 3 (33.3%)
898
+ Max absolute difference: 5.999999999994898e-05
899
+ Max relative difference: 2.5713661239633743e-05
900
+ x: torch.ndarray([1.0000, 2.3333, nan], dtype=float64)
901
+ y: torch.ndarray([1.0000, 2.3334, nan], dtype=float64)
902
+
903
+ >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
904
+ ... [1.0,2.33333, 5], decimal=5)
905
+ Traceback (most recent call last):
906
+ ...
907
+ AssertionError:
908
+ Arrays are not almost equal to 5 decimals
909
+ <BLANKLINE>
910
+ x and y nan location mismatch:
911
+ x: torch.ndarray([1.0000, 2.3333, nan], dtype=float64)
912
+ y: torch.ndarray([1.0000, 2.3333, 5.0000], dtype=float64)
913
+
914
+ """
915
+ __tracebackhide__ = True # Hide traceback for py.test
916
+ from torch._numpy import any as npany, float_, issubdtype, number, result_type
917
+
918
+ def compare(x, y):
919
+ try:
920
+ if npany(gisinf(x)) or npany(gisinf(y)):
921
+ xinfid = gisinf(x)
922
+ yinfid = gisinf(y)
923
+ if not (xinfid == yinfid).all():
924
+ return False
925
+ # if one item, x and y is +- inf
926
+ if x.size == y.size == 1:
927
+ return x == y
928
+ x = x[~xinfid]
929
+ y = y[~yinfid]
930
+ except (TypeError, NotImplementedError):
931
+ pass
932
+
933
+ # make sure y is an inexact type to avoid abs(MIN_INT); will cause
934
+ # casting of x later.
935
+ dtype = result_type(y, 1.0)
936
+ y = asanyarray(y, dtype)
937
+ z = abs(x - y)
938
+
939
+ if not issubdtype(z.dtype, number):
940
+ z = z.astype(float_) # handle object arrays
941
+
942
+ return z < 1.5 * 10.0 ** (-decimal)
943
+
944
+ assert_array_compare(
945
+ compare,
946
+ x,
947
+ y,
948
+ err_msg=err_msg,
949
+ verbose=verbose,
950
+ header=("Arrays are not almost equal to %d decimals" % decimal),
951
+ precision=decimal,
952
+ )
953
+
954
+
955
+ def assert_array_less(x, y, err_msg="", verbose=True):
956
+ """
957
+ Raises an AssertionError if two array_like objects are not ordered by less
958
+ than.
959
+
960
+ Given two array_like objects, check that the shape is equal and all
961
+ elements of the first object are strictly smaller than those of the
962
+ second object. An exception is raised at shape mismatch or incorrectly
963
+ ordered values. Shape mismatch does not raise if an object has zero
964
+ dimension. In contrast to the standard usage in numpy, NaNs are
965
+ compared, no assertion is raised if both objects have NaNs in the same
966
+ positions.
967
+
968
+
969
+
970
+ Parameters
971
+ ----------
972
+ x : array_like
973
+ The smaller object to check.
974
+ y : array_like
975
+ The larger object to compare.
976
+ err_msg : string
977
+ The error message to be printed in case of failure.
978
+ verbose : bool
979
+ If True, the conflicting values are appended to the error message.
980
+
981
+ Raises
982
+ ------
983
+ AssertionError
984
+ If actual and desired objects are not equal.
985
+
986
+ See Also
987
+ --------
988
+ assert_array_equal: tests objects for equality
989
+ assert_array_almost_equal: test objects for equality up to precision
990
+
991
+
992
+
993
+ Examples
994
+ --------
995
+ >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1.1, 2.0, np.nan])
996
+ >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1, 2.0, np.nan])
997
+ Traceback (most recent call last):
998
+ ...
999
+ AssertionError:
1000
+ Arrays are not less-ordered
1001
+ <BLANKLINE>
1002
+ Mismatched elements: 1 / 3 (33.3%)
1003
+ Max absolute difference: 1.0
1004
+ Max relative difference: 0.5
1005
+ x: torch.ndarray([1., 1., nan], dtype=float64)
1006
+ y: torch.ndarray([1., 2., nan], dtype=float64)
1007
+
1008
+ >>> np.testing.assert_array_less([1.0, 4.0], 3)
1009
+ Traceback (most recent call last):
1010
+ ...
1011
+ AssertionError:
1012
+ Arrays are not less-ordered
1013
+ <BLANKLINE>
1014
+ Mismatched elements: 1 / 2 (50%)
1015
+ Max absolute difference: 2.0
1016
+ Max relative difference: 0.6666666666666666
1017
+ x: torch.ndarray([1., 4.], dtype=float64)
1018
+ y: torch.ndarray(3)
1019
+
1020
+ >>> np.testing.assert_array_less([1.0, 2.0, 3.0], [4])
1021
+ Traceback (most recent call last):
1022
+ ...
1023
+ AssertionError:
1024
+ Arrays are not less-ordered
1025
+ <BLANKLINE>
1026
+ (shapes (3,), (1,) mismatch)
1027
+ x: torch.ndarray([1., 2., 3.], dtype=float64)
1028
+ y: torch.ndarray([4])
1029
+
1030
+ """
1031
+ __tracebackhide__ = True # Hide traceback for py.test
1032
+ assert_array_compare(
1033
+ operator.__lt__,
1034
+ x,
1035
+ y,
1036
+ err_msg=err_msg,
1037
+ verbose=verbose,
1038
+ header="Arrays are not less-ordered",
1039
+ equal_inf=False,
1040
+ )
1041
+
1042
+
1043
+ def assert_string_equal(actual, desired):
1044
+ """
1045
+ Test if two strings are equal.
1046
+
1047
+ If the given strings are equal, `assert_string_equal` does nothing.
1048
+ If they are not equal, an AssertionError is raised, and the diff
1049
+ between the strings is shown.
1050
+
1051
+ Parameters
1052
+ ----------
1053
+ actual : str
1054
+ The string to test for equality against the expected string.
1055
+ desired : str
1056
+ The expected string.
1057
+
1058
+ Examples
1059
+ --------
1060
+ >>> np.testing.assert_string_equal('abc', 'abc') # doctest: +SKIP
1061
+ >>> np.testing.assert_string_equal('abc', 'abcd') # doctest: +SKIP
1062
+ Traceback (most recent call last):
1063
+ File "<stdin>", line 1, in <module>
1064
+ ...
1065
+ AssertionError: Differences in strings:
1066
+ - abc+ abcd? +
1067
+
1068
+ """
1069
+ # delay import of difflib to reduce startup time
1070
+ __tracebackhide__ = True # Hide traceback for py.test
1071
+ import difflib
1072
+
1073
+ if not isinstance(actual, str):
1074
+ raise AssertionError(repr(type(actual)))
1075
+ if not isinstance(desired, str):
1076
+ raise AssertionError(repr(type(desired)))
1077
+ if desired == actual:
1078
+ return
1079
+
1080
+ diff = list(
1081
+ difflib.Differ().compare(actual.splitlines(True), desired.splitlines(True))
1082
+ )
1083
+ diff_list = []
1084
+ while diff:
1085
+ d1 = diff.pop(0)
1086
+ if d1.startswith(" "):
1087
+ continue
1088
+ if d1.startswith("- "):
1089
+ l = [d1]
1090
+ d2 = diff.pop(0)
1091
+ if d2.startswith("? "):
1092
+ l.append(d2)
1093
+ d2 = diff.pop(0)
1094
+ if not d2.startswith("+ "):
1095
+ raise AssertionError(repr(d2))
1096
+ l.append(d2)
1097
+ if diff:
1098
+ d3 = diff.pop(0)
1099
+ if d3.startswith("? "):
1100
+ l.append(d3)
1101
+ else:
1102
+ diff.insert(0, d3)
1103
+ if d2[2:] == d1[2:]:
1104
+ continue
1105
+ diff_list.extend(l)
1106
+ continue
1107
+ raise AssertionError(repr(d1))
1108
+ if not diff_list:
1109
+ return
1110
+ msg = f"Differences in strings:\n{''.join(diff_list).rstrip()}"
1111
+ if actual != desired:
1112
+ raise AssertionError(msg)
1113
+
1114
+
1115
+ import unittest
1116
+
1117
+
1118
+ class _Dummy(unittest.TestCase):
1119
+ def nop(self):
1120
+ pass
1121
+
1122
+
1123
+ _d = _Dummy("nop")
1124
+
1125
+
1126
+ def assert_raises_regex(exception_class, expected_regexp, *args, **kwargs):
1127
+ """
1128
+ assert_raises_regex(exception_class, expected_regexp, callable, *args,
1129
+ **kwargs)
1130
+ assert_raises_regex(exception_class, expected_regexp)
1131
+
1132
+ Fail unless an exception of class exception_class and with message that
1133
+ matches expected_regexp is thrown by callable when invoked with arguments
1134
+ args and keyword arguments kwargs.
1135
+
1136
+ Alternatively, can be used as a context manager like `assert_raises`.
1137
+
1138
+ Notes
1139
+ -----
1140
+ .. versionadded:: 1.9.0
1141
+
1142
+ """
1143
+ __tracebackhide__ = True # Hide traceback for py.test
1144
+ return _d.assertRaisesRegex(exception_class, expected_regexp, *args, **kwargs)
1145
+
1146
+
1147
+ def decorate_methods(cls, decorator, testmatch=None):
1148
+ """
1149
+ Apply a decorator to all methods in a class matching a regular expression.
1150
+
1151
+ The given decorator is applied to all public methods of `cls` that are
1152
+ matched by the regular expression `testmatch`
1153
+ (``testmatch.search(methodname)``). Methods that are private, i.e. start
1154
+ with an underscore, are ignored.
1155
+
1156
+ Parameters
1157
+ ----------
1158
+ cls : class
1159
+ Class whose methods to decorate.
1160
+ decorator : function
1161
+ Decorator to apply to methods
1162
+ testmatch : compiled regexp or str, optional
1163
+ The regular expression. Default value is None, in which case the
1164
+ nose default (``re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)``)
1165
+ is used.
1166
+ If `testmatch` is a string, it is compiled to a regular expression
1167
+ first.
1168
+
1169
+ """
1170
+ if testmatch is None:
1171
+ testmatch = re.compile(rf"(?:^|[\\b_\\.{os.sep}-])[Tt]est")
1172
+ else:
1173
+ testmatch = re.compile(testmatch)
1174
+ cls_attr = cls.__dict__
1175
+
1176
+ # delayed import to reduce startup time
1177
+ from inspect import isfunction
1178
+
1179
+ methods = [_m for _m in cls_attr.values() if isfunction(_m)]
1180
+ for function in methods:
1181
+ try:
1182
+ if hasattr(function, "compat_func_name"):
1183
+ funcname = function.compat_func_name
1184
+ else:
1185
+ funcname = function.__name__
1186
+ except AttributeError:
1187
+ # not a function
1188
+ continue
1189
+ if testmatch.search(funcname) and not funcname.startswith("_"):
1190
+ setattr(cls, funcname, decorator(function))
1191
+ return
1192
+
1193
+
1194
+ def _assert_valid_refcount(op):
1195
+ """
1196
+ Check that ufuncs don't mishandle refcount of object `1`.
1197
+ Used in a few regression tests.
1198
+ """
1199
+ if not HAS_REFCOUNT:
1200
+ return True
1201
+
1202
+ import gc
1203
+
1204
+ import numpy as np
1205
+
1206
+ b = np.arange(100 * 100).reshape(100, 100)
1207
+ c = b
1208
+ i = 1
1209
+
1210
+ gc.disable()
1211
+ try:
1212
+ rc = sys.getrefcount(i)
1213
+ for j in range(15):
1214
+ d = op(b, c)
1215
+ assert_(sys.getrefcount(i) >= rc)
1216
+ finally:
1217
+ gc.enable()
1218
+ del d # for pyflakes
1219
+
1220
+
1221
+ def assert_allclose(
1222
+ actual,
1223
+ desired,
1224
+ rtol=1e-7,
1225
+ atol=0,
1226
+ equal_nan=True,
1227
+ err_msg="",
1228
+ verbose=True,
1229
+ check_dtype=False,
1230
+ ):
1231
+ """
1232
+ Raises an AssertionError if two objects are not equal up to desired
1233
+ tolerance.
1234
+
1235
+ Given two array_like objects, check that their shapes and all elements
1236
+ are equal (but see the Notes for the special handling of a scalar). An
1237
+ exception is raised if the shapes mismatch or any values conflict. In
1238
+ contrast to the standard usage in numpy, NaNs are compared like numbers,
1239
+ no assertion is raised if both objects have NaNs in the same positions.
1240
+
1241
+ The test is equivalent to ``allclose(actual, desired, rtol, atol)`` (note
1242
+ that ``allclose`` has different default values). It compares the difference
1243
+ between `actual` and `desired` to ``atol + rtol * abs(desired)``.
1244
+
1245
+ .. versionadded:: 1.5.0
1246
+
1247
+ Parameters
1248
+ ----------
1249
+ actual : array_like
1250
+ Array obtained.
1251
+ desired : array_like
1252
+ Array desired.
1253
+ rtol : float, optional
1254
+ Relative tolerance.
1255
+ atol : float, optional
1256
+ Absolute tolerance.
1257
+ equal_nan : bool, optional.
1258
+ If True, NaNs will compare equal.
1259
+ err_msg : str, optional
1260
+ The error message to be printed in case of failure.
1261
+ verbose : bool, optional
1262
+ If True, the conflicting values are appended to the error message.
1263
+
1264
+ Raises
1265
+ ------
1266
+ AssertionError
1267
+ If actual and desired are not equal up to specified precision.
1268
+
1269
+ See Also
1270
+ --------
1271
+ assert_array_almost_equal_nulp, assert_array_max_ulp
1272
+
1273
+ Notes
1274
+ -----
1275
+ When one of `actual` and `desired` is a scalar and the other is
1276
+ array_like, the function checks that each element of the array_like
1277
+ object is equal to the scalar.
1278
+
1279
+ Examples
1280
+ --------
1281
+ >>> x = [1e-5, 1e-3, 1e-1]
1282
+ >>> y = np.arccos(np.cos(x))
1283
+ >>> np.testing.assert_allclose(x, y, rtol=1e-5, atol=0)
1284
+
1285
+ """
1286
+ __tracebackhide__ = True # Hide traceback for py.test
1287
+
1288
+ def compare(x, y):
1289
+ return np.isclose(x, y, rtol=rtol, atol=atol, equal_nan=equal_nan)
1290
+
1291
+ actual, desired = asanyarray(actual), asanyarray(desired)
1292
+ header = f"Not equal to tolerance rtol={rtol:g}, atol={atol:g}"
1293
+
1294
+ if check_dtype:
1295
+ assert actual.dtype == desired.dtype
1296
+
1297
+ assert_array_compare(
1298
+ compare,
1299
+ actual,
1300
+ desired,
1301
+ err_msg=str(err_msg),
1302
+ verbose=verbose,
1303
+ header=header,
1304
+ equal_nan=equal_nan,
1305
+ )
1306
+
1307
+
1308
+ def assert_array_almost_equal_nulp(x, y, nulp=1):
1309
+ """
1310
+ Compare two arrays relatively to their spacing.
1311
+
1312
+ This is a relatively robust method to compare two arrays whose amplitude
1313
+ is variable.
1314
+
1315
+ Parameters
1316
+ ----------
1317
+ x, y : array_like
1318
+ Input arrays.
1319
+ nulp : int, optional
1320
+ The maximum number of unit in the last place for tolerance (see Notes).
1321
+ Default is 1.
1322
+
1323
+ Returns
1324
+ -------
1325
+ None
1326
+
1327
+ Raises
1328
+ ------
1329
+ AssertionError
1330
+ If the spacing between `x` and `y` for one or more elements is larger
1331
+ than `nulp`.
1332
+
1333
+ See Also
1334
+ --------
1335
+ assert_array_max_ulp : Check that all items of arrays differ in at most
1336
+ N Units in the Last Place.
1337
+ spacing : Return the distance between x and the nearest adjacent number.
1338
+
1339
+ Notes
1340
+ -----
1341
+ An assertion is raised if the following condition is not met::
1342
+
1343
+ abs(x - y) <= nulp * spacing(maximum(abs(x), abs(y)))
1344
+
1345
+ Examples
1346
+ --------
1347
+ >>> x = np.array([1., 1e-10, 1e-20])
1348
+ >>> eps = np.finfo(x.dtype).eps
1349
+ >>> np.testing.assert_array_almost_equal_nulp(x, x*eps/2 + x) # doctest: +SKIP
1350
+
1351
+ >>> np.testing.assert_array_almost_equal_nulp(x, x*eps + x) # doctest: +SKIP
1352
+ Traceback (most recent call last):
1353
+ ...
1354
+ AssertionError: X and Y are not equal to 1 ULP (max is 2)
1355
+
1356
+ """
1357
+ __tracebackhide__ = True # Hide traceback for py.test
1358
+ import numpy as np
1359
+
1360
+ ax = np.abs(x)
1361
+ ay = np.abs(y)
1362
+ ref = nulp * np.spacing(np.where(ax > ay, ax, ay))
1363
+ if not np.all(np.abs(x - y) <= ref):
1364
+ if np.iscomplexobj(x) or np.iscomplexobj(y):
1365
+ msg = "X and Y are not equal to %d ULP" % nulp
1366
+ else:
1367
+ max_nulp = np.max(nulp_diff(x, y))
1368
+ msg = "X and Y are not equal to %d ULP (max is %g)" % (nulp, max_nulp)
1369
+ raise AssertionError(msg)
1370
+
1371
+
1372
+ def assert_array_max_ulp(a, b, maxulp=1, dtype=None):
1373
+ """
1374
+ Check that all items of arrays differ in at most N Units in the Last Place.
1375
+
1376
+ Parameters
1377
+ ----------
1378
+ a, b : array_like
1379
+ Input arrays to be compared.
1380
+ maxulp : int, optional
1381
+ The maximum number of units in the last place that elements of `a` and
1382
+ `b` can differ. Default is 1.
1383
+ dtype : dtype, optional
1384
+ Data-type to convert `a` and `b` to if given. Default is None.
1385
+
1386
+ Returns
1387
+ -------
1388
+ ret : ndarray
1389
+ Array containing number of representable floating point numbers between
1390
+ items in `a` and `b`.
1391
+
1392
+ Raises
1393
+ ------
1394
+ AssertionError
1395
+ If one or more elements differ by more than `maxulp`.
1396
+
1397
+ Notes
1398
+ -----
1399
+ For computing the ULP difference, this API does not differentiate between
1400
+ various representations of NAN (ULP difference between 0x7fc00000 and 0xffc00000
1401
+ is zero).
1402
+
1403
+ See Also
1404
+ --------
1405
+ assert_array_almost_equal_nulp : Compare two arrays relatively to their
1406
+ spacing.
1407
+
1408
+ Examples
1409
+ --------
1410
+ >>> a = np.linspace(0., 1., 100)
1411
+ >>> res = np.testing.assert_array_max_ulp(a, np.arcsin(np.sin(a))) # doctest: +SKIP
1412
+
1413
+ """
1414
+ __tracebackhide__ = True # Hide traceback for py.test
1415
+ import numpy as np
1416
+
1417
+ ret = nulp_diff(a, b, dtype)
1418
+ if not np.all(ret <= maxulp):
1419
+ raise AssertionError(
1420
+ f"Arrays are not almost equal up to {maxulp:g} "
1421
+ f"ULP (max difference is {np.max(ret):g} ULP)"
1422
+ )
1423
+ return ret
1424
+
1425
+
1426
+ def nulp_diff(x, y, dtype=None):
1427
+ """For each item in x and y, return the number of representable floating
1428
+ points between them.
1429
+
1430
+ Parameters
1431
+ ----------
1432
+ x : array_like
1433
+ first input array
1434
+ y : array_like
1435
+ second input array
1436
+ dtype : dtype, optional
1437
+ Data-type to convert `x` and `y` to if given. Default is None.
1438
+
1439
+ Returns
1440
+ -------
1441
+ nulp : array_like
1442
+ number of representable floating point numbers between each item in x
1443
+ and y.
1444
+
1445
+ Notes
1446
+ -----
1447
+ For computing the ULP difference, this API does not differentiate between
1448
+ various representations of NAN (ULP difference between 0x7fc00000 and 0xffc00000
1449
+ is zero).
1450
+
1451
+ Examples
1452
+ --------
1453
+ # By definition, epsilon is the smallest number such as 1 + eps != 1, so
1454
+ # there should be exactly one ULP between 1 and 1 + eps
1455
+ >>> nulp_diff(1, 1 + np.finfo(x.dtype).eps) # doctest: +SKIP
1456
+ 1.0
1457
+ """
1458
+ import numpy as np
1459
+
1460
+ if dtype:
1461
+ x = np.asarray(x, dtype=dtype)
1462
+ y = np.asarray(y, dtype=dtype)
1463
+ else:
1464
+ x = np.asarray(x)
1465
+ y = np.asarray(y)
1466
+
1467
+ t = np.common_type(x, y)
1468
+ if np.iscomplexobj(x) or np.iscomplexobj(y):
1469
+ raise NotImplementedError("_nulp not implemented for complex array")
1470
+
1471
+ x = np.array([x], dtype=t)
1472
+ y = np.array([y], dtype=t)
1473
+
1474
+ x[np.isnan(x)] = np.nan
1475
+ y[np.isnan(y)] = np.nan
1476
+
1477
+ if not x.shape == y.shape:
1478
+ raise ValueError(f"x and y do not have the same shape: {x.shape} - {y.shape}")
1479
+
1480
+ def _diff(rx, ry, vdt):
1481
+ diff = np.asarray(rx - ry, dtype=vdt)
1482
+ return np.abs(diff)
1483
+
1484
+ rx = integer_repr(x)
1485
+ ry = integer_repr(y)
1486
+ return _diff(rx, ry, t)
1487
+
1488
+
1489
+ def _integer_repr(x, vdt, comp):
1490
+ # Reinterpret binary representation of the float as sign-magnitude:
1491
+ # take into account two-complement representation
1492
+ # See also
1493
+ # https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/
1494
+ rx = x.view(vdt)
1495
+ if not (rx.size == 1):
1496
+ rx[rx < 0] = comp - rx[rx < 0]
1497
+ else:
1498
+ if rx < 0:
1499
+ rx = comp - rx
1500
+
1501
+ return rx
1502
+
1503
+
1504
+ def integer_repr(x):
1505
+ """Return the signed-magnitude interpretation of the binary representation
1506
+ of x."""
1507
+ import numpy as np
1508
+
1509
+ if x.dtype == np.float16:
1510
+ return _integer_repr(x, np.int16, np.int16(-(2**15)))
1511
+ elif x.dtype == np.float32:
1512
+ return _integer_repr(x, np.int32, np.int32(-(2**31)))
1513
+ elif x.dtype == np.float64:
1514
+ return _integer_repr(x, np.int64, np.int64(-(2**63)))
1515
+ else:
1516
+ raise ValueError(f"Unsupported dtype {x.dtype}")
1517
+
1518
+
1519
+ @contextlib.contextmanager
1520
+ def _assert_warns_context(warning_class, name=None):
1521
+ __tracebackhide__ = True # Hide traceback for py.test
1522
+ with suppress_warnings() as sup:
1523
+ l = sup.record(warning_class)
1524
+ yield
1525
+ if not len(l) > 0:
1526
+ name_str = f" when calling {name}" if name is not None else ""
1527
+ raise AssertionError("No warning raised" + name_str)
1528
+
1529
+
1530
+ def assert_warns(warning_class, *args, **kwargs):
1531
+ """
1532
+ Fail unless the given callable throws the specified warning.
1533
+
1534
+ A warning of class warning_class should be thrown by the callable when
1535
+ invoked with arguments args and keyword arguments kwargs.
1536
+ If a different type of warning is thrown, it will not be caught.
1537
+
1538
+ If called with all arguments other than the warning class omitted, may be
1539
+ used as a context manager:
1540
+
1541
+ with assert_warns(SomeWarning):
1542
+ do_something()
1543
+
1544
+ The ability to be used as a context manager is new in NumPy v1.11.0.
1545
+
1546
+ .. versionadded:: 1.4.0
1547
+
1548
+ Parameters
1549
+ ----------
1550
+ warning_class : class
1551
+ The class defining the warning that `func` is expected to throw.
1552
+ func : callable, optional
1553
+ Callable to test
1554
+ *args : Arguments
1555
+ Arguments for `func`.
1556
+ **kwargs : Kwargs
1557
+ Keyword arguments for `func`.
1558
+
1559
+ Returns
1560
+ -------
1561
+ The value returned by `func`.
1562
+
1563
+ Examples
1564
+ --------
1565
+ >>> import warnings
1566
+ >>> def deprecated_func(num):
1567
+ ... warnings.warn("Please upgrade", DeprecationWarning)
1568
+ ... return num*num
1569
+ >>> with np.testing.assert_warns(DeprecationWarning):
1570
+ ... assert deprecated_func(4) == 16
1571
+ >>> # or passing a func
1572
+ >>> ret = np.testing.assert_warns(DeprecationWarning, deprecated_func, 4)
1573
+ >>> assert ret == 16
1574
+ """
1575
+ if not args:
1576
+ return _assert_warns_context(warning_class)
1577
+
1578
+ func = args[0]
1579
+ args = args[1:]
1580
+ with _assert_warns_context(warning_class, name=func.__name__):
1581
+ return func(*args, **kwargs)
1582
+
1583
+
1584
+ @contextlib.contextmanager
1585
+ def _assert_no_warnings_context(name=None):
1586
+ __tracebackhide__ = True # Hide traceback for py.test
1587
+ with warnings.catch_warnings(record=True) as l:
1588
+ warnings.simplefilter("always")
1589
+ yield
1590
+ if len(l) > 0:
1591
+ name_str = f" when calling {name}" if name is not None else ""
1592
+ raise AssertionError(f"Got warnings{name_str}: {l}")
1593
+
1594
+
1595
+ def assert_no_warnings(*args, **kwargs):
1596
+ """
1597
+ Fail if the given callable produces any warnings.
1598
+
1599
+ If called with all arguments omitted, may be used as a context manager:
1600
+
1601
+ with assert_no_warnings():
1602
+ do_something()
1603
+
1604
+ The ability to be used as a context manager is new in NumPy v1.11.0.
1605
+
1606
+ .. versionadded:: 1.7.0
1607
+
1608
+ Parameters
1609
+ ----------
1610
+ func : callable
1611
+ The callable to test.
1612
+ \\*args : Arguments
1613
+ Arguments passed to `func`.
1614
+ \\*\\*kwargs : Kwargs
1615
+ Keyword arguments passed to `func`.
1616
+
1617
+ Returns
1618
+ -------
1619
+ The value returned by `func`.
1620
+
1621
+ """
1622
+ if not args:
1623
+ return _assert_no_warnings_context()
1624
+
1625
+ func = args[0]
1626
+ args = args[1:]
1627
+ with _assert_no_warnings_context(name=func.__name__):
1628
+ return func(*args, **kwargs)
1629
+
1630
+
1631
+ def _gen_alignment_data(dtype=float32, type="binary", max_size=24):
1632
+ """
1633
+ generator producing data with different alignment and offsets
1634
+ to test simd vectorization
1635
+
1636
+ Parameters
1637
+ ----------
1638
+ dtype : dtype
1639
+ data type to produce
1640
+ type : string
1641
+ 'unary': create data for unary operations, creates one input
1642
+ and output array
1643
+ 'binary': create data for unary operations, creates two input
1644
+ and output array
1645
+ max_size : integer
1646
+ maximum size of data to produce
1647
+
1648
+ Returns
1649
+ -------
1650
+ if type is 'unary' yields one output, one input array and a message
1651
+ containing information on the data
1652
+ if type is 'binary' yields one output array, two input array and a message
1653
+ containing information on the data
1654
+
1655
+ """
1656
+ ufmt = "unary offset=(%d, %d), size=%d, dtype=%r, %s"
1657
+ bfmt = "binary offset=(%d, %d, %d), size=%d, dtype=%r, %s"
1658
+ for o in range(3):
1659
+ for s in range(o + 2, max(o + 3, max_size)):
1660
+ if type == "unary":
1661
+
1662
+ def inp():
1663
+ return arange(s, dtype=dtype)[o:]
1664
+
1665
+ out = empty((s,), dtype=dtype)[o:]
1666
+ yield out, inp(), ufmt % (o, o, s, dtype, "out of place")
1667
+ d = inp()
1668
+ yield d, d, ufmt % (o, o, s, dtype, "in place")
1669
+ yield out[1:], inp()[:-1], ufmt % (
1670
+ o + 1,
1671
+ o,
1672
+ s - 1,
1673
+ dtype,
1674
+ "out of place",
1675
+ )
1676
+ yield out[:-1], inp()[1:], ufmt % (
1677
+ o,
1678
+ o + 1,
1679
+ s - 1,
1680
+ dtype,
1681
+ "out of place",
1682
+ )
1683
+ yield inp()[:-1], inp()[1:], ufmt % (o, o + 1, s - 1, dtype, "aliased")
1684
+ yield inp()[1:], inp()[:-1], ufmt % (o + 1, o, s - 1, dtype, "aliased")
1685
+ if type == "binary":
1686
+
1687
+ def inp1():
1688
+ return arange(s, dtype=dtype)[o:]
1689
+
1690
+ inp2 = inp1
1691
+ out = empty((s,), dtype=dtype)[o:]
1692
+ yield out, inp1(), inp2(), bfmt % (o, o, o, s, dtype, "out of place")
1693
+ d = inp1()
1694
+ yield d, d, inp2(), bfmt % (o, o, o, s, dtype, "in place1")
1695
+ d = inp2()
1696
+ yield d, inp1(), d, bfmt % (o, o, o, s, dtype, "in place2")
1697
+ yield out[1:], inp1()[:-1], inp2()[:-1], bfmt % (
1698
+ o + 1,
1699
+ o,
1700
+ o,
1701
+ s - 1,
1702
+ dtype,
1703
+ "out of place",
1704
+ )
1705
+ yield out[:-1], inp1()[1:], inp2()[:-1], bfmt % (
1706
+ o,
1707
+ o + 1,
1708
+ o,
1709
+ s - 1,
1710
+ dtype,
1711
+ "out of place",
1712
+ )
1713
+ yield out[:-1], inp1()[:-1], inp2()[1:], bfmt % (
1714
+ o,
1715
+ o,
1716
+ o + 1,
1717
+ s - 1,
1718
+ dtype,
1719
+ "out of place",
1720
+ )
1721
+ yield inp1()[1:], inp1()[:-1], inp2()[:-1], bfmt % (
1722
+ o + 1,
1723
+ o,
1724
+ o,
1725
+ s - 1,
1726
+ dtype,
1727
+ "aliased",
1728
+ )
1729
+ yield inp1()[:-1], inp1()[1:], inp2()[:-1], bfmt % (
1730
+ o,
1731
+ o + 1,
1732
+ o,
1733
+ s - 1,
1734
+ dtype,
1735
+ "aliased",
1736
+ )
1737
+ yield inp1()[:-1], inp1()[:-1], inp2()[1:], bfmt % (
1738
+ o,
1739
+ o,
1740
+ o + 1,
1741
+ s - 1,
1742
+ dtype,
1743
+ "aliased",
1744
+ )
1745
+
1746
+
1747
+ class IgnoreException(Exception):
1748
+ "Ignoring this exception due to disabled feature"
1749
+
1750
+
1751
+ @contextlib.contextmanager
1752
+ def tempdir(*args, **kwargs):
1753
+ """Context manager to provide a temporary test folder.
1754
+
1755
+ All arguments are passed as this to the underlying tempfile.mkdtemp
1756
+ function.
1757
+
1758
+ """
1759
+ tmpdir = mkdtemp(*args, **kwargs)
1760
+ try:
1761
+ yield tmpdir
1762
+ finally:
1763
+ shutil.rmtree(tmpdir)
1764
+
1765
+
1766
+ @contextlib.contextmanager
1767
+ def temppath(*args, **kwargs):
1768
+ """Context manager for temporary files.
1769
+
1770
+ Context manager that returns the path to a closed temporary file. Its
1771
+ parameters are the same as for tempfile.mkstemp and are passed directly
1772
+ to that function. The underlying file is removed when the context is
1773
+ exited, so it should be closed at that time.
1774
+
1775
+ Windows does not allow a temporary file to be opened if it is already
1776
+ open, so the underlying file must be closed after opening before it
1777
+ can be opened again.
1778
+
1779
+ """
1780
+ fd, path = mkstemp(*args, **kwargs)
1781
+ os.close(fd)
1782
+ try:
1783
+ yield path
1784
+ finally:
1785
+ os.remove(path)
1786
+
1787
+
1788
+ class clear_and_catch_warnings(warnings.catch_warnings):
1789
+ """Context manager that resets warning registry for catching warnings
1790
+
1791
+ Warnings can be slippery, because, whenever a warning is triggered, Python
1792
+ adds a ``__warningregistry__`` member to the *calling* module. This makes
1793
+ it impossible to retrigger the warning in this module, whatever you put in
1794
+ the warnings filters. This context manager accepts a sequence of `modules`
1795
+ as a keyword argument to its constructor and:
1796
+
1797
+ * stores and removes any ``__warningregistry__`` entries in given `modules`
1798
+ on entry;
1799
+ * resets ``__warningregistry__`` to its previous state on exit.
1800
+
1801
+ This makes it possible to trigger any warning afresh inside the context
1802
+ manager without disturbing the state of warnings outside.
1803
+
1804
+ For compatibility with Python 3.0, please consider all arguments to be
1805
+ keyword-only.
1806
+
1807
+ Parameters
1808
+ ----------
1809
+ record : bool, optional
1810
+ Specifies whether warnings should be captured by a custom
1811
+ implementation of ``warnings.showwarning()`` and be appended to a list
1812
+ returned by the context manager. Otherwise None is returned by the
1813
+ context manager. The objects appended to the list are arguments whose
1814
+ attributes mirror the arguments to ``showwarning()``.
1815
+ modules : sequence, optional
1816
+ Sequence of modules for which to reset warnings registry on entry and
1817
+ restore on exit. To work correctly, all 'ignore' filters should
1818
+ filter by one of these modules.
1819
+
1820
+ Examples
1821
+ --------
1822
+ >>> import warnings
1823
+ >>> with np.testing.clear_and_catch_warnings( # doctest: +SKIP
1824
+ ... modules=[np.core.fromnumeric]):
1825
+ ... warnings.simplefilter('always')
1826
+ ... warnings.filterwarnings('ignore', module='np.core.fromnumeric')
1827
+ ... # do something that raises a warning but ignore those in
1828
+ ... # np.core.fromnumeric
1829
+ """
1830
+
1831
+ class_modules = ()
1832
+
1833
+ def __init__(self, record=False, modules=()):
1834
+ self.modules = set(modules).union(self.class_modules)
1835
+ self._warnreg_copies = {}
1836
+ super().__init__(record=record)
1837
+
1838
+ def __enter__(self):
1839
+ for mod in self.modules:
1840
+ if hasattr(mod, "__warningregistry__"):
1841
+ mod_reg = mod.__warningregistry__
1842
+ self._warnreg_copies[mod] = mod_reg.copy()
1843
+ mod_reg.clear()
1844
+ return super().__enter__()
1845
+
1846
+ def __exit__(self, *exc_info):
1847
+ super().__exit__(*exc_info)
1848
+ for mod in self.modules:
1849
+ if hasattr(mod, "__warningregistry__"):
1850
+ mod.__warningregistry__.clear()
1851
+ if mod in self._warnreg_copies:
1852
+ mod.__warningregistry__.update(self._warnreg_copies[mod])
1853
+
1854
+
1855
+ class suppress_warnings:
1856
+ """
1857
+ Context manager and decorator doing much the same as
1858
+ ``warnings.catch_warnings``.
1859
+
1860
+ However, it also provides a filter mechanism to work around
1861
+ https://bugs.python.org/issue4180.
1862
+
1863
+ This bug causes Python before 3.4 to not reliably show warnings again
1864
+ after they have been ignored once (even within catch_warnings). It
1865
+ means that no "ignore" filter can be used easily, since following
1866
+ tests might need to see the warning. Additionally it allows easier
1867
+ specificity for testing warnings and can be nested.
1868
+
1869
+ Parameters
1870
+ ----------
1871
+ forwarding_rule : str, optional
1872
+ One of "always", "once", "module", or "location". Analogous to
1873
+ the usual warnings module filter mode, it is useful to reduce
1874
+ noise mostly on the outmost level. Unsuppressed and unrecorded
1875
+ warnings will be forwarded based on this rule. Defaults to "always".
1876
+ "location" is equivalent to the warnings "default", match by exact
1877
+ location the warning warning originated from.
1878
+
1879
+ Notes
1880
+ -----
1881
+ Filters added inside the context manager will be discarded again
1882
+ when leaving it. Upon entering all filters defined outside a
1883
+ context will be applied automatically.
1884
+
1885
+ When a recording filter is added, matching warnings are stored in the
1886
+ ``log`` attribute as well as in the list returned by ``record``.
1887
+
1888
+ If filters are added and the ``module`` keyword is given, the
1889
+ warning registry of this module will additionally be cleared when
1890
+ applying it, entering the context, or exiting it. This could cause
1891
+ warnings to appear a second time after leaving the context if they
1892
+ were configured to be printed once (default) and were already
1893
+ printed before the context was entered.
1894
+
1895
+ Nesting this context manager will work as expected when the
1896
+ forwarding rule is "always" (default). Unfiltered and unrecorded
1897
+ warnings will be passed out and be matched by the outer level.
1898
+ On the outmost level they will be printed (or caught by another
1899
+ warnings context). The forwarding rule argument can modify this
1900
+ behaviour.
1901
+
1902
+ Like ``catch_warnings`` this context manager is not threadsafe.
1903
+
1904
+ Examples
1905
+ --------
1906
+
1907
+ With a context manager::
1908
+
1909
+ with np.testing.suppress_warnings() as sup:
1910
+ sup.filter(DeprecationWarning, "Some text")
1911
+ sup.filter(module=np.ma.core)
1912
+ log = sup.record(FutureWarning, "Does this occur?")
1913
+ command_giving_warnings()
1914
+ # The FutureWarning was given once, the filtered warnings were
1915
+ # ignored. All other warnings abide outside settings (may be
1916
+ # printed/error)
1917
+ assert_(len(log) == 1)
1918
+ assert_(len(sup.log) == 1) # also stored in log attribute
1919
+
1920
+ Or as a decorator::
1921
+
1922
+ sup = np.testing.suppress_warnings()
1923
+ sup.filter(module=np.ma.core) # module must match exactly
1924
+ @sup
1925
+ def some_function():
1926
+ # do something which causes a warning in np.ma.core
1927
+ pass
1928
+ """
1929
+
1930
+ def __init__(self, forwarding_rule="always"):
1931
+ self._entered = False
1932
+
1933
+ # Suppressions are either instance or defined inside one with block:
1934
+ self._suppressions = []
1935
+
1936
+ if forwarding_rule not in {"always", "module", "once", "location"}:
1937
+ raise ValueError("unsupported forwarding rule.")
1938
+ self._forwarding_rule = forwarding_rule
1939
+
1940
+ def _clear_registries(self):
1941
+ if hasattr(warnings, "_filters_mutated"):
1942
+ # clearing the registry should not be necessary on new pythons,
1943
+ # instead the filters should be mutated.
1944
+ warnings._filters_mutated()
1945
+ return
1946
+ # Simply clear the registry, this should normally be harmless,
1947
+ # note that on new pythons it would be invalidated anyway.
1948
+ for module in self._tmp_modules:
1949
+ if hasattr(module, "__warningregistry__"):
1950
+ module.__warningregistry__.clear()
1951
+
1952
+ def _filter(self, category=Warning, message="", module=None, record=False):
1953
+ if record:
1954
+ record = [] # The log where to store warnings
1955
+ else:
1956
+ record = None
1957
+ if self._entered:
1958
+ if module is None:
1959
+ warnings.filterwarnings("always", category=category, message=message)
1960
+ else:
1961
+ module_regex = module.__name__.replace(".", r"\.") + "$"
1962
+ warnings.filterwarnings(
1963
+ "always", category=category, message=message, module=module_regex
1964
+ )
1965
+ self._tmp_modules.add(module)
1966
+ self._clear_registries()
1967
+
1968
+ self._tmp_suppressions.append(
1969
+ (category, message, re.compile(message, re.I), module, record)
1970
+ )
1971
+ else:
1972
+ self._suppressions.append(
1973
+ (category, message, re.compile(message, re.I), module, record)
1974
+ )
1975
+
1976
+ return record
1977
+
1978
+ def filter(self, category=Warning, message="", module=None):
1979
+ """
1980
+ Add a new suppressing filter or apply it if the state is entered.
1981
+
1982
+ Parameters
1983
+ ----------
1984
+ category : class, optional
1985
+ Warning class to filter
1986
+ message : string, optional
1987
+ Regular expression matching the warning message.
1988
+ module : module, optional
1989
+ Module to filter for. Note that the module (and its file)
1990
+ must match exactly and cannot be a submodule. This may make
1991
+ it unreliable for external modules.
1992
+
1993
+ Notes
1994
+ -----
1995
+ When added within a context, filters are only added inside
1996
+ the context and will be forgotten when the context is exited.
1997
+ """
1998
+ self._filter(category=category, message=message, module=module, record=False)
1999
+
2000
+ def record(self, category=Warning, message="", module=None):
2001
+ """
2002
+ Append a new recording filter or apply it if the state is entered.
2003
+
2004
+ All warnings matching will be appended to the ``log`` attribute.
2005
+
2006
+ Parameters
2007
+ ----------
2008
+ category : class, optional
2009
+ Warning class to filter
2010
+ message : string, optional
2011
+ Regular expression matching the warning message.
2012
+ module : module, optional
2013
+ Module to filter for. Note that the module (and its file)
2014
+ must match exactly and cannot be a submodule. This may make
2015
+ it unreliable for external modules.
2016
+
2017
+ Returns
2018
+ -------
2019
+ log : list
2020
+ A list which will be filled with all matched warnings.
2021
+
2022
+ Notes
2023
+ -----
2024
+ When added within a context, filters are only added inside
2025
+ the context and will be forgotten when the context is exited.
2026
+ """
2027
+ return self._filter(
2028
+ category=category, message=message, module=module, record=True
2029
+ )
2030
+
2031
+ def __enter__(self):
2032
+ if self._entered:
2033
+ raise RuntimeError("cannot enter suppress_warnings twice.")
2034
+
2035
+ self._orig_show = warnings.showwarning
2036
+ self._filters = warnings.filters
2037
+ warnings.filters = self._filters[:]
2038
+
2039
+ self._entered = True
2040
+ self._tmp_suppressions = []
2041
+ self._tmp_modules = set()
2042
+ self._forwarded = set()
2043
+
2044
+ self.log = [] # reset global log (no need to keep same list)
2045
+
2046
+ for cat, mess, _, mod, log in self._suppressions:
2047
+ if log is not None:
2048
+ del log[:] # clear the log
2049
+ if mod is None:
2050
+ warnings.filterwarnings("always", category=cat, message=mess)
2051
+ else:
2052
+ module_regex = mod.__name__.replace(".", r"\.") + "$"
2053
+ warnings.filterwarnings(
2054
+ "always", category=cat, message=mess, module=module_regex
2055
+ )
2056
+ self._tmp_modules.add(mod)
2057
+ warnings.showwarning = self._showwarning
2058
+ self._clear_registries()
2059
+
2060
+ return self
2061
+
2062
+ def __exit__(self, *exc_info):
2063
+ warnings.showwarning = self._orig_show
2064
+ warnings.filters = self._filters
2065
+ self._clear_registries()
2066
+ self._entered = False
2067
+ del self._orig_show
2068
+ del self._filters
2069
+
2070
+ def _showwarning(
2071
+ self, message, category, filename, lineno, *args, use_warnmsg=None, **kwargs
2072
+ ):
2073
+ for cat, _, pattern, mod, rec in (self._suppressions + self._tmp_suppressions)[
2074
+ ::-1
2075
+ ]:
2076
+ if issubclass(category, cat) and pattern.match(message.args[0]) is not None:
2077
+ if mod is None:
2078
+ # Message and category match, either recorded or ignored
2079
+ if rec is not None:
2080
+ msg = WarningMessage(
2081
+ message, category, filename, lineno, **kwargs
2082
+ )
2083
+ self.log.append(msg)
2084
+ rec.append(msg)
2085
+ return
2086
+ # Use startswith, because warnings strips the c or o from
2087
+ # .pyc/.pyo files.
2088
+ elif mod.__file__.startswith(filename):
2089
+ # The message and module (filename) match
2090
+ if rec is not None:
2091
+ msg = WarningMessage(
2092
+ message, category, filename, lineno, **kwargs
2093
+ )
2094
+ self.log.append(msg)
2095
+ rec.append(msg)
2096
+ return
2097
+
2098
+ # There is no filter in place, so pass to the outside handler
2099
+ # unless we should only pass it once
2100
+ if self._forwarding_rule == "always":
2101
+ if use_warnmsg is None:
2102
+ self._orig_show(message, category, filename, lineno, *args, **kwargs)
2103
+ else:
2104
+ self._orig_showmsg(use_warnmsg)
2105
+ return
2106
+
2107
+ if self._forwarding_rule == "once":
2108
+ signature = (message.args, category)
2109
+ elif self._forwarding_rule == "module":
2110
+ signature = (message.args, category, filename)
2111
+ elif self._forwarding_rule == "location":
2112
+ signature = (message.args, category, filename, lineno)
2113
+
2114
+ if signature in self._forwarded:
2115
+ return
2116
+ self._forwarded.add(signature)
2117
+ if use_warnmsg is None:
2118
+ self._orig_show(message, category, filename, lineno, *args, **kwargs)
2119
+ else:
2120
+ self._orig_showmsg(use_warnmsg)
2121
+
2122
+ def __call__(self, func):
2123
+ """
2124
+ Function decorator to apply certain suppressions to a whole
2125
+ function.
2126
+ """
2127
+
2128
+ @wraps(func)
2129
+ def new_func(*args, **kwargs):
2130
+ with self:
2131
+ return func(*args, **kwargs)
2132
+
2133
+ return new_func
2134
+
2135
+
2136
+ @contextlib.contextmanager
2137
+ def _assert_no_gc_cycles_context(name=None):
2138
+ __tracebackhide__ = True # Hide traceback for py.test
2139
+
2140
+ # not meaningful to test if there is no refcounting
2141
+ if not HAS_REFCOUNT:
2142
+ yield
2143
+ return
2144
+
2145
+ assert_(gc.isenabled())
2146
+ gc.disable()
2147
+ gc_debug = gc.get_debug()
2148
+ try:
2149
+ for i in range(100):
2150
+ if gc.collect() == 0:
2151
+ break
2152
+ else:
2153
+ raise RuntimeError(
2154
+ "Unable to fully collect garbage - perhaps a __del__ method "
2155
+ "is creating more reference cycles?"
2156
+ )
2157
+
2158
+ gc.set_debug(gc.DEBUG_SAVEALL)
2159
+ yield
2160
+ # gc.collect returns the number of unreachable objects in cycles that
2161
+ # were found -- we are checking that no cycles were created in the context
2162
+ n_objects_in_cycles = gc.collect()
2163
+ objects_in_cycles = gc.garbage[:]
2164
+ finally:
2165
+ del gc.garbage[:]
2166
+ gc.set_debug(gc_debug)
2167
+ gc.enable()
2168
+
2169
+ if n_objects_in_cycles:
2170
+ name_str = f" when calling {name}" if name is not None else ""
2171
+ raise AssertionError(
2172
+ "Reference cycles were found{}: {} objects were collected, "
2173
+ "of which {} are shown below:{}".format(
2174
+ name_str,
2175
+ n_objects_in_cycles,
2176
+ len(objects_in_cycles),
2177
+ "".join(
2178
+ "\n {} object with id={}:\n {}".format(
2179
+ type(o).__name__,
2180
+ id(o),
2181
+ pprint.pformat(o).replace("\n", "\n "),
2182
+ )
2183
+ for o in objects_in_cycles
2184
+ ),
2185
+ )
2186
+ )
2187
+
2188
+
2189
+ def assert_no_gc_cycles(*args, **kwargs):
2190
+ """
2191
+ Fail if the given callable produces any reference cycles.
2192
+
2193
+ If called with all arguments omitted, may be used as a context manager:
2194
+
2195
+ with assert_no_gc_cycles():
2196
+ do_something()
2197
+
2198
+ .. versionadded:: 1.15.0
2199
+
2200
+ Parameters
2201
+ ----------
2202
+ func : callable
2203
+ The callable to test.
2204
+ \\*args : Arguments
2205
+ Arguments passed to `func`.
2206
+ \\*\\*kwargs : Kwargs
2207
+ Keyword arguments passed to `func`.
2208
+
2209
+ Returns
2210
+ -------
2211
+ Nothing. The result is deliberately discarded to ensure that all cycles
2212
+ are found.
2213
+
2214
+ """
2215
+ if not args:
2216
+ return _assert_no_gc_cycles_context()
2217
+
2218
+ func = args[0]
2219
+ args = args[1:]
2220
+ with _assert_no_gc_cycles_context(name=func.__name__):
2221
+ func(*args, **kwargs)
2222
+
2223
+
2224
+ def break_cycles():
2225
+ """
2226
+ Break reference cycles by calling gc.collect
2227
+ Objects can call other objects' methods (for instance, another object's
2228
+ __del__) inside their own __del__. On PyPy, the interpreter only runs
2229
+ between calls to gc.collect, so multiple calls are needed to completely
2230
+ release all cycles.
2231
+ """
2232
+
2233
+ gc.collect()
2234
+ if IS_PYPY:
2235
+ # a few more, just to make sure all the finalizers are called
2236
+ gc.collect()
2237
+ gc.collect()
2238
+ gc.collect()
2239
+ gc.collect()
2240
+
2241
+
2242
+ def requires_memory(free_bytes):
2243
+ """Decorator to skip a test if not enough memory is available"""
2244
+ import pytest
2245
+
2246
+ def decorator(func):
2247
+ @wraps(func)
2248
+ def wrapper(*a, **kw):
2249
+ msg = check_free_memory(free_bytes)
2250
+ if msg is not None:
2251
+ pytest.skip(msg)
2252
+
2253
+ try:
2254
+ return func(*a, **kw)
2255
+ except MemoryError:
2256
+ # Probably ran out of memory regardless: don't regard as failure
2257
+ pytest.xfail("MemoryError raised")
2258
+
2259
+ return wrapper
2260
+
2261
+ return decorator
2262
+
2263
+
2264
+ def check_free_memory(free_bytes):
2265
+ """
2266
+ Check whether `free_bytes` amount of memory is currently free.
2267
+ Returns: None if enough memory available, otherwise error message
2268
+ """
2269
+ env_var = "NPY_AVAILABLE_MEM"
2270
+ env_value = os.environ.get(env_var)
2271
+ if env_value is not None:
2272
+ try:
2273
+ mem_free = _parse_size(env_value)
2274
+ except ValueError as exc:
2275
+ raise ValueError( # noqa: B904
2276
+ f"Invalid environment variable {env_var}: {exc}"
2277
+ )
2278
+
2279
+ msg = (
2280
+ f"{free_bytes/1e9} GB memory required, but environment variable "
2281
+ f"NPY_AVAILABLE_MEM={env_value} set"
2282
+ )
2283
+ else:
2284
+ mem_free = _get_mem_available()
2285
+
2286
+ if mem_free is None:
2287
+ msg = (
2288
+ "Could not determine available memory; set NPY_AVAILABLE_MEM "
2289
+ "environment variable (e.g. NPY_AVAILABLE_MEM=16GB) to run "
2290
+ "the test."
2291
+ )
2292
+ mem_free = -1
2293
+ else:
2294
+ msg = (
2295
+ f"{free_bytes/1e9} GB memory required, but {mem_free/1e9} GB available"
2296
+ )
2297
+
2298
+ return msg if mem_free < free_bytes else None
2299
+
2300
+
2301
+ def _parse_size(size_str):
2302
+ """Convert memory size strings ('12 GB' etc.) to float"""
2303
+ suffixes = {
2304
+ "": 1,
2305
+ "b": 1,
2306
+ "k": 1000,
2307
+ "m": 1000**2,
2308
+ "g": 1000**3,
2309
+ "t": 1000**4,
2310
+ "kb": 1000,
2311
+ "mb": 1000**2,
2312
+ "gb": 1000**3,
2313
+ "tb": 1000**4,
2314
+ "kib": 1024,
2315
+ "mib": 1024**2,
2316
+ "gib": 1024**3,
2317
+ "tib": 1024**4,
2318
+ }
2319
+
2320
+ size_re = re.compile(
2321
+ r"^\s*(\d+|\d+\.\d+)\s*({})\s*$".format("|".join(suffixes.keys())), re.I
2322
+ )
2323
+
2324
+ m = size_re.match(size_str.lower())
2325
+ if not m or m.group(2) not in suffixes:
2326
+ raise ValueError(f"value {size_str!r} not a valid size")
2327
+ return int(float(m.group(1)) * suffixes[m.group(2)])
2328
+
2329
+
2330
+ def _get_mem_available():
2331
+ """Return available memory in bytes, or None if unknown."""
2332
+ try:
2333
+ import psutil
2334
+
2335
+ return psutil.virtual_memory().available
2336
+ except (ImportError, AttributeError):
2337
+ pass
2338
+
2339
+ if sys.platform.startswith("linux"):
2340
+ info = {}
2341
+ with open("/proc/meminfo") as f:
2342
+ for line in f:
2343
+ p = line.split()
2344
+ info[p[0].strip(":").lower()] = int(p[1]) * 1024
2345
+
2346
+ if "memavailable" in info:
2347
+ # Linux >= 3.14
2348
+ return info["memavailable"]
2349
+ else:
2350
+ return info["memfree"] + info["cached"]
2351
+
2352
+ return None
2353
+
2354
+
2355
+ def _no_tracing(func):
2356
+ """
2357
+ Decorator to temporarily turn off tracing for the duration of a test.
2358
+ Needed in tests that check refcounting, otherwise the tracing itself
2359
+ influences the refcounts
2360
+ """
2361
+ if not hasattr(sys, "gettrace"):
2362
+ return func
2363
+ else:
2364
+
2365
+ @wraps(func)
2366
+ def wrapper(*args, **kwargs):
2367
+ original_trace = sys.gettrace()
2368
+ try:
2369
+ sys.settrace(None)
2370
+ return func(*args, **kwargs)
2371
+ finally:
2372
+ sys.settrace(original_trace)
2373
+
2374
+ return wrapper
2375
+
2376
+
2377
+ def _get_glibc_version():
2378
+ try:
2379
+ ver = os.confstr("CS_GNU_LIBC_VERSION").rsplit(" ")[1]
2380
+ except Exception as inst:
2381
+ ver = "0.0"
2382
+
2383
+ return ver
2384
+
2385
+
2386
+ _glibcver = _get_glibc_version()
2387
+
2388
+
2389
+ def _glibc_older_than(x):
2390
+ return _glibcver != "0.0" and _glibcver < x
parrot/lib/python3.10/site-packages/torch/_strobelight/__init__.py ADDED
File without changes
parrot/lib/python3.10/site-packages/torch/_strobelight/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (170 Bytes). View file
 
parrot/lib/python3.10/site-packages/torch/_strobelight/__pycache__/cli_function_profiler.cpython-310.pyc ADDED
Binary file (9.02 kB). View file
 
parrot/lib/python3.10/site-packages/torch/_strobelight/__pycache__/compile_time_profiler.cpython-310.pyc ADDED
Binary file (5.18 kB). View file
 
parrot/lib/python3.10/site-packages/torch/_strobelight/cli_function_profiler.py ADDED
@@ -0,0 +1,311 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: disallow-untyped-defs
2
+
3
+ import functools
4
+ import logging
5
+ import os
6
+ import re
7
+ import subprocess
8
+ import time
9
+ from threading import Lock
10
+ from timeit import default_timer as timer
11
+ from typing import Any, List, Optional, Sequence
12
+
13
+
14
+ logger = logging.getLogger("strobelight_function_profiler")
15
+
16
+ console_handler = logging.StreamHandler()
17
+ formatter = logging.Formatter(
18
+ "%(name)s, line %(lineno)d, %(asctime)s, %(levelname)s: %(message)s"
19
+ )
20
+ console_handler.setFormatter(formatter)
21
+
22
+ logger.addHandler(console_handler)
23
+ logger.setLevel(logging.INFO)
24
+ logger.propagate = False
25
+
26
+
27
+ class StrobelightCLIProfilerError(Exception):
28
+ """
29
+ Raised when an error happens during strobelight profiling
30
+ """
31
+
32
+
33
+ def _pid_namespace_link(pid: Optional[int] = None) -> str:
34
+ """Returns the link to the process's namespace, example: pid:[4026531836]"""
35
+ PID_NAMESPACE_PATH = "/proc/{}/ns/pid"
36
+ pid = pid or os.getpid()
37
+ return os.readlink(PID_NAMESPACE_PATH.format(pid))
38
+
39
+
40
+ def _pid_namespace(pid: Optional[int] = None) -> int:
41
+ """Returns the process's namespace id"""
42
+ pid = pid or os.getpid()
43
+ link = _pid_namespace_link(pid)
44
+ return int(link[link.find("[") + 1 : -1])
45
+
46
+
47
+ def _command_to_string(command: Sequence[str]) -> str:
48
+ return " ".join(command)
49
+
50
+
51
+ class StrobelightCLIFunctionProfiler:
52
+ """
53
+ Note: this is a Meta only tool.
54
+
55
+ StrobelightCLIFunctionProfiler can be used to profile a python function and
56
+ generate a strobelight link with the results. It works on meta servers but
57
+ does not requries an fbcode target.
58
+ When stop_at_error is false(default), error during profiling does not prevent
59
+ the work function from running.
60
+
61
+ Check function_profiler_example.py for an example.
62
+ """
63
+
64
+ # This lock is used to make sure only one thread is running the profiler at any point.
65
+ _lock = Lock()
66
+
67
+ def __init__(
68
+ self,
69
+ *,
70
+ stop_at_error: bool = False,
71
+ max_profile_duration_sec: int = 60 * 10,
72
+ sample_each: float = 1e7, # sample each sample_each cycles.
73
+ run_user_name: str = "pytorch-strobelight-ondemand",
74
+ timeout_wait_for_running_sec: int = 60,
75
+ timeout_wait_for_finished_sec: int = 60,
76
+ recorded_env_variables: Optional[List[str]] = None,
77
+ sample_tags: Optional[List[str]] = None,
78
+ stack_max_len: int = 127,
79
+ async_stack_max_len: int = 127,
80
+ ):
81
+ self.stop_at_error = stop_at_error
82
+ self.max_profile_duration_sec = max_profile_duration_sec
83
+ self.sample_each = sample_each
84
+ self.run_user_name = run_user_name
85
+ self.timeout_wait_for_running_sec = timeout_wait_for_running_sec
86
+ self.timeout_wait_for_finished_sec = timeout_wait_for_finished_sec
87
+ # Results of the most recent run.
88
+ # Tracks the strobelight run id of the most recent run
89
+ self.current_run_id: Optional[int] = None
90
+ self.profile_result: Optional[List[str]] = None
91
+ self.sample_tags = sample_tags
92
+
93
+ def _run_async(self) -> None:
94
+ processId = os.getpid()
95
+ namespace = _pid_namespace(processId)
96
+ command = [
97
+ "strobeclient",
98
+ "run",
99
+ "--profiler",
100
+ "pyperf",
101
+ "--event",
102
+ "cycles",
103
+ "--async",
104
+ "--sample-interval",
105
+ f"{int(self.sample_each)}",
106
+ "--duration-ms",
107
+ f"{int(self.max_profile_duration_sec * 1000)}",
108
+ "--pid",
109
+ f"{namespace}:{processId}",
110
+ ]
111
+
112
+ if self.sample_tags:
113
+ command.append("--sample-tags")
114
+ command.append(",".join(self.sample_tags))
115
+
116
+ logger.debug("running command: %s", _command_to_string(command))
117
+ result = subprocess.run(command, capture_output=True)
118
+ output = result.stderr.decode("utf-8")
119
+ logger.debug("output:\n{%s}", output)
120
+
121
+ if result.returncode != 0:
122
+ raise StrobelightCLIProfilerError(
123
+ f"failed to start strobelight profiling, error in run_async:{output}"
124
+ )
125
+
126
+ if match := re.search(r"INFO Run Id: (-?\d+)", output):
127
+ self.current_run_id = int(match.group(1))
128
+ return
129
+
130
+ raise StrobelightCLIProfilerError(
131
+ f"failed to start strobelight profiling, unexpected result {output}"
132
+ )
133
+
134
+ def _wait_for_running(self, counter: int = 0) -> None:
135
+ if counter > 20:
136
+ raise StrobelightCLIProfilerError(
137
+ "wait_for_running called more than 20 times"
138
+ )
139
+
140
+ command = ["strobeclient", "getRunStatus", "--run-id", f"{self.current_run_id}"]
141
+ logger.debug("running command: %s", _command_to_string(command))
142
+ result = subprocess.run(command, capture_output=True)
143
+ output = result.stderr.decode("utf-8")
144
+ logger.debug("output:\n{%s}", output)
145
+
146
+ if result.returncode != 0:
147
+ raise StrobelightCLIProfilerError(
148
+ f"failed to start strobelight profiling, error in wait_for_running:{output}"
149
+ )
150
+
151
+ if match := re.search("Profile run status: (.*)", output):
152
+ current_status = match.group(1)
153
+ if current_status == "RUNNING":
154
+ return
155
+ elif current_status == "PREPARING":
156
+ time.sleep(10)
157
+ self._wait_for_running(counter + 1)
158
+ return
159
+ else:
160
+ raise StrobelightCLIProfilerError(f"unexpected {current_status} phase")
161
+
162
+ raise StrobelightCLIProfilerError(f"unexpected output\n: {output} ")
163
+
164
+ def _stop_run(self) -> None:
165
+ command = ["strobeclient", "stopRun", "--run-id", str(self.current_run_id)]
166
+ logger.debug("running command: %s", _command_to_string(command))
167
+ result = subprocess.run(command, capture_output=True)
168
+ output = result.stderr.decode("utf-8")
169
+ logger.debug("output:\n{%s}", output)
170
+
171
+ if result.returncode != 0:
172
+ raise StrobelightCLIProfilerError(
173
+ f"failed to stop strobelight profiling, return code is not 0 :{output}"
174
+ )
175
+
176
+ if match := re.search("INFO ::1:(.*)", output):
177
+ current_status = match.group(1)
178
+ if current_status.__contains__("Success!"):
179
+ return
180
+ else:
181
+ raise StrobelightCLIProfilerError(
182
+ f"failed to stop strobelight profiling, got {current_status} result"
183
+ )
184
+
185
+ raise StrobelightCLIProfilerError(f"unexpected output\n: {output} ")
186
+
187
+ def _get_results(self) -> None:
188
+ command = ["strobeclient", "getRunStatus", "--run-id", str(self.current_run_id)]
189
+ logger.debug("running command: %s", _command_to_string(command))
190
+ result = subprocess.run(command, capture_output=True)
191
+ output = result.stderr.decode("utf-8")
192
+ logger.debug("output:\n{%s}", output)
193
+
194
+ if result.returncode != 0:
195
+ raise StrobelightCLIProfilerError(
196
+ f"failed to extract profiling results, return code is not 0 : {output}"
197
+ )
198
+
199
+ if match := re.search("INFO ::1:(.*)", output):
200
+ current_status = match.group(1)
201
+ if current_status.__contains__("Profile run status: PROCESSING"):
202
+ time.sleep(10)
203
+ self._get_results()
204
+ return
205
+ elif not current_status.__contains__("Profile run finished with SUCCESS"):
206
+ raise StrobelightCLIProfilerError(
207
+ f"failed to extract profiling results, unexpected response {output}"
208
+ )
209
+
210
+ self.profile_result = []
211
+ for item in re.findall(
212
+ r"(Total samples(.*)|GraphProfiler(.*)|Icicle view \(python stack\)(.*))",
213
+ output,
214
+ ):
215
+ self.profile_result += item[0]
216
+ logger.info(item[0])
217
+
218
+ def _stop_strobelight_no_throw(
219
+ self,
220
+ collect_results: bool,
221
+ ) -> None:
222
+ try:
223
+ # call stop run
224
+ self._stop_run()
225
+ logger.info("strobelight profiling stopped")
226
+
227
+ logger.debug("collection stopped")
228
+
229
+ if not collect_results:
230
+ return
231
+
232
+ self._get_results()
233
+ except Exception as error:
234
+ logger.warning("error during stop_strobelight", exc_info=True)
235
+
236
+ # Return true if strobelight started and is running. Never throw.
237
+ def _start_strobelight(self) -> bool:
238
+ strobelight_started = False
239
+ try:
240
+ self._run_async()
241
+ strobelight_started = True
242
+ logger.info("strobelight run id is: %s", self.current_run_id)
243
+ self._wait_for_running()
244
+ logger.info("strobelight profiling running")
245
+ return True
246
+
247
+ except Exception as error:
248
+ logger.warning("error during start_strobelight:", exc_info=True)
249
+ if strobelight_started:
250
+ self._stop_strobelight_no_throw(collect_results=False)
251
+ return False
252
+
253
+ def profile(self, work_function: Any, *args: Any, **kwargs: Any) -> Any:
254
+ self.current_run_id = None
255
+ self.profile_result = None
256
+
257
+ if locked := StrobelightCLIFunctionProfiler._lock.acquire(False):
258
+ if not locked:
259
+ if self.stop_at_error:
260
+ raise StrobelightCLIProfilerError("concurrent runs not supported")
261
+
262
+ logger.warning("concurrent runs not supported")
263
+ return work_function(*args, **kwargs)
264
+
265
+ started = self._start_strobelight()
266
+ if not started:
267
+ if self.stop_at_error:
268
+ StrobelightCLIFunctionProfiler._lock.release()
269
+ raise StrobelightCLIProfilerError(
270
+ "failed to start strobelight profiling"
271
+ )
272
+ result = work_function(*args, **kwargs)
273
+ StrobelightCLIFunctionProfiler._lock.release()
274
+ return result
275
+
276
+ try:
277
+ logger.debug("collection started")
278
+ start = timer()
279
+ result = work_function(*args, **kwargs)
280
+ end = timer()
281
+ total_time = end - start # Time in seconds, e.g. 5.38091952400282
282
+ logger.info("work function took %s seconds", total_time)
283
+ self._stop_strobelight_no_throw(collect_results=True)
284
+ StrobelightCLIFunctionProfiler._lock.release()
285
+ return result
286
+ except Exception as error:
287
+ logger.warning("work function throw exception", exc_info=True)
288
+ self._stop_strobelight_no_throw(collect_results=False)
289
+ StrobelightCLIFunctionProfiler._lock.release()
290
+ raise error
291
+
292
+
293
+ # A function decorator that wraps profile, if no profiler is provided one with
294
+ # default args is created. A function can be annotated as:
295
+ # @strobelight()
296
+ # @strobelight(profiler = StrobelightFunctionProfiler(stop_at_error=True,..))
297
+ # @strobelight(stop_at_error=True,...)
298
+ def strobelight(
299
+ profiler: Optional[StrobelightCLIFunctionProfiler] = None, **kwargs: Any
300
+ ) -> Any:
301
+ if not profiler:
302
+ profiler = StrobelightCLIFunctionProfiler(**kwargs)
303
+
304
+ def strobelight_inner(work_function: Any) -> Any:
305
+ @functools.wraps(work_function)
306
+ def wrapper_function(*args: Any, **kwargs: Any) -> Any:
307
+ return profiler.profile(work_function, *args, **kwargs)
308
+
309
+ return wrapper_function
310
+
311
+ return strobelight_inner
parrot/lib/python3.10/site-packages/torch/_strobelight/compile_time_profiler.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: disallow-untyped-defs
2
+
3
+ import logging
4
+ import os
5
+
6
+ from datetime import datetime
7
+ from socket import gethostname
8
+ from typing import Any, Optional
9
+
10
+ from torch._strobelight.cli_function_profiler import StrobelightCLIFunctionProfiler
11
+
12
+ logger = logging.getLogger("strobelight_compile_time_profiler")
13
+
14
+ console_handler = logging.StreamHandler()
15
+ formatter = logging.Formatter(
16
+ "%(name)s, line %(lineno)d, %(asctime)s, %(levelname)s: %(message)s"
17
+ )
18
+ console_handler.setFormatter(formatter)
19
+
20
+ logger.addHandler(console_handler)
21
+ logger.setLevel(logging.INFO)
22
+ logger.propagate = False
23
+
24
+
25
+ class StrobelightCompileTimeProfiler:
26
+ success_profile_count: int = 0
27
+ failed_profile_count: int = 0
28
+ ignored_profile_runs: int = 0
29
+ inside_profile_compile_time: bool = False
30
+ enabled: bool = False
31
+ # A unique identifier that is used as the run_user_name in the strobelight profile to
32
+ # associate all compile time profiles together.
33
+ identifier: Optional[str] = None
34
+
35
+ current_phase: Optional[str] = None
36
+
37
+ profiler: Optional[Any] = None
38
+
39
+ max_stack_length: int = int(
40
+ os.environ.get("COMPILE_STROBELIGHT_MAX_STACK_LENGTH", 127)
41
+ )
42
+ max_profile_time: int = int(
43
+ os.environ.get("COMPILE_STROBELIGHT_MAX_PROFILE_TIME", 60 * 30)
44
+ )
45
+ # Collect sample each x cycles.
46
+ sample_each: int = int(
47
+ float(os.environ.get("COMPILE_STROBELIGHT_SAMPLE_RATE", 1e7))
48
+ )
49
+
50
+ @classmethod
51
+ def enable(cls, profiler_class: Any = StrobelightCLIFunctionProfiler) -> None:
52
+ if cls.enabled:
53
+ logger.info("compile time strobelight profiling already enabled")
54
+ return
55
+
56
+ logger.info("compile time strobelight profiling enabled")
57
+
58
+ if profiler_class is StrobelightCLIFunctionProfiler:
59
+ import shutil
60
+
61
+ if not shutil.which("strobeclient"):
62
+ logger.info(
63
+ "strobeclient not found, cant enable compile time strobelight profiling, seems"
64
+ "like you are not on a FB machine."
65
+ )
66
+ return
67
+
68
+ cls.enabled = True
69
+ cls._cls_init()
70
+ # profiler_class should have public API similar to that of StrobelightCLIFunctionProfiler.
71
+ # we have pass different functionProfilerClass for meta-internal fbcode targets.
72
+ cls.profiler = profiler_class(
73
+ sample_each=cls.sample_each,
74
+ max_profile_duration_sec=cls.max_profile_time,
75
+ stack_max_len=cls.max_stack_length,
76
+ async_stack_max_len=cls.max_stack_length,
77
+ run_user_name="pt2-profiler/"
78
+ + os.environ.get("USER", os.environ.get("USERNAME", "")),
79
+ sample_tags={cls.identifier},
80
+ )
81
+
82
+ @classmethod
83
+ def _cls_init(cls) -> None:
84
+ cls.identifier = "{date}{pid}{hostname}".format(
85
+ date=datetime.now().strftime("%Y-%m-%d-%H:%M:%S"),
86
+ pid=os.getpid(),
87
+ hostname=gethostname(),
88
+ )
89
+
90
+ logger.info("Unique sample tag for this run is: %s", cls.identifier)
91
+ logger.info(
92
+ "You can use the following link to access the strobelight profile at the end of the run: %s",
93
+ (
94
+ "https://www.internalfb.com/intern/scuba/query/?dataset=pyperf_experime"
95
+ "ntal%2Fon_demand&drillstate=%7B%22purposes%22%3A[]%2C%22end%22%3A%22no"
96
+ "w%22%2C%22start%22%3A%22-30%20days%22%2C%22filterMode%22%3A%22DEFAULT%"
97
+ "22%2C%22modifiers%22%3A[]%2C%22sampleCols%22%3A[]%2C%22cols%22%3A[%22n"
98
+ "amespace_id%22%2C%22namespace_process_id%22]%2C%22derivedCols%22%3A[]%"
99
+ "2C%22mappedCols%22%3A[]%2C%22enumCols%22%3A[]%2C%22return_remainder%22"
100
+ "%3Afalse%2C%22should_pivot%22%3Afalse%2C%22is_timeseries%22%3Afalse%2C"
101
+ "%22hideEmptyColumns%22%3Afalse%2C%22timezone%22%3A%22America%2FLos_Ang"
102
+ "eles%22%2C%22compare%22%3A%22none%22%2C%22samplingRatio%22%3A%221%22%2"
103
+ "C%22metric%22%3A%22count%22%2C%22aggregation_field%22%3A%22async_stack"
104
+ "_complete%22%2C%22top%22%3A10000%2C%22aggregateList%22%3A[]%2C%22param"
105
+ "_dimensions%22%3A[%7B%22dim%22%3A%22py_async_stack%22%2C%22op%22%3A%22"
106
+ "edge%22%2C%22param%22%3A%220%22%2C%22anchor%22%3A%220%22%7D]%2C%22orde"
107
+ "r%22%3A%22weight%22%2C%22order_desc%22%3Atrue%2C%22constraints%22%3A[["
108
+ "%7B%22column%22%3A%22sample_tags%22%2C%22op%22%3A%22all%22%2C%22value%"
109
+ f"22%3A[%22[%5C%22{cls.identifier}%5C%22]%22]%7D]]%2C%22c_constraints%22%3A[[]]%2C%22b"
110
+ "_constraints%22%3A[[]]%2C%22ignoreGroupByInComparison%22%3Afalse%7D&vi"
111
+ "ew=GraphProfilerView&&normalized=1712358002&pool=uber"
112
+ ),
113
+ )
114
+
115
+ @classmethod
116
+ def _log_stats(cls) -> None:
117
+ logger.info(
118
+ "%s strobelight success runs out of %s non-recursive compilation events.",
119
+ cls.success_profile_count,
120
+ cls.success_profile_count + cls.failed_profile_count,
121
+ )
122
+
123
+ # TODO use threadlevel meta data to tags to record phases.
124
+ @classmethod
125
+ def profile_compile_time(
126
+ cls, func: Any, phase_name: str, *args: Any, **kwargs: Any
127
+ ) -> Any:
128
+ if not cls.enabled:
129
+ return func(*args, **kwargs)
130
+
131
+ if cls.profiler is None:
132
+ logger.error("profiler is not set")
133
+ return
134
+
135
+ if cls.inside_profile_compile_time:
136
+ cls.ignored_profile_runs += 1
137
+ logger.info(
138
+ "profile_compile_time is requested for phase: %s while already in running phase: %s, recursive call ignored",
139
+ phase_name,
140
+ cls.current_phase,
141
+ )
142
+ return func(*args, **kwargs)
143
+
144
+ cls.inside_profile_compile_time = True
145
+ cls.current_phase = phase_name
146
+
147
+ work_result = cls.profiler.profile(func, *args, **kwargs)
148
+
149
+ if cls.profiler.profile_result is not None:
150
+ cls.success_profile_count += 1
151
+ else:
152
+ cls.failed_profile_count += 1
153
+
154
+ cls._log_stats()
155
+ cls.inside_profile_compile_time = False
156
+ return work_result
parrot/lib/python3.10/site-packages/torch/mtia/__init__.py ADDED
@@ -0,0 +1,263 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: allow-untyped-defs
2
+ r"""
3
+ This package enables an interface for accessing MTIA backend in python
4
+ """
5
+
6
+ import threading
7
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
8
+
9
+ import torch
10
+
11
+ from torch.types import Device
12
+
13
+ from .. import device as _device, Tensor
14
+ from .._utils import _dummy_type, _LazySeedTracker, classproperty
15
+ from ._utils import _get_device_index
16
+
17
+ _device_t = Union[_device, str, int, None]
18
+
19
+ # torch.mtia.Event/Stream is alias of torch.Event/Stream
20
+ Event = torch.Event
21
+ Stream = torch.Stream
22
+
23
+ _initialized = False
24
+ _queued_calls: List[
25
+ Tuple[Callable[[], None], List[str]]
26
+ ] = [] # don't invoke these until initialization occurs
27
+ _tls = threading.local()
28
+ _initialization_lock = threading.Lock()
29
+ _lazy_seed_tracker = _LazySeedTracker()
30
+
31
+
32
+ def init():
33
+ _lazy_init()
34
+
35
+
36
+ def is_initialized():
37
+ r"""Return whether PyTorch's MTIA state has been initialized."""
38
+ return _initialized and not _is_in_bad_fork()
39
+
40
+
41
+ def _is_in_bad_fork() -> bool:
42
+ return torch._C._mtia_isInBadFork()
43
+
44
+
45
+ def _lazy_init() -> None:
46
+ global _initialized, _queued_calls
47
+ if is_initialized() or hasattr(_tls, "is_initializing"):
48
+ return
49
+ with _initialization_lock:
50
+ # We be double-checked locking, boys! This is OK because
51
+ # the above test was GIL protected anyway. The inner test
52
+ # is for when a thread blocked on some other thread which was
53
+ # doing the initialization; when they get the lock, they will
54
+ # find there is nothing left to do.
55
+ if is_initialized():
56
+ return
57
+ # It is important to prevent other threads from entering _lazy_init
58
+ # immediately, while we are still guaranteed to have the GIL, because some
59
+ # of the C calls we make below will release the GIL
60
+ if _is_in_bad_fork():
61
+ raise RuntimeError(
62
+ "Cannot re-initialize MTIA in forked subprocess. To use MTIA with "
63
+ "multiprocessing, you must use the 'spawn' start method"
64
+ )
65
+ if not _is_compiled():
66
+ raise AssertionError("Torch not compiled with MTIA enabled")
67
+
68
+ torch._C._mtia_init()
69
+ # Some of the queued calls may reentrantly call _lazy_init();
70
+ # we need to just return without initializing in that case.
71
+ # However, we must not let any *other* threads in!
72
+ _tls.is_initializing = True
73
+
74
+ for calls in _lazy_seed_tracker.get_calls():
75
+ if calls:
76
+ _queued_calls.append(calls)
77
+
78
+ try:
79
+ for queued_call, orig_traceback in _queued_calls:
80
+ try:
81
+ queued_call()
82
+ except Exception as e:
83
+ msg = (
84
+ f"MTIA call failed lazily at initialization with error: {str(e)}\n\n"
85
+ f"MTIA call was originally invoked at:\n\n{''.join(orig_traceback)}"
86
+ )
87
+ raise DeferredMtiaCallError(msg) from e
88
+ finally:
89
+ delattr(_tls, "is_initializing")
90
+ _initialized = True
91
+
92
+
93
+ class DeferredMtiaCallError(Exception):
94
+ pass
95
+
96
+
97
+ def _is_compiled() -> bool:
98
+ r"""Return true if compiled with MTIA support."""
99
+ return torch._C._mtia_isBuilt()
100
+
101
+
102
+ def is_available() -> bool:
103
+ r"""Return true if MTIA device is available"""
104
+ if not _is_compiled():
105
+ return False
106
+ # MTIA has to init devices first to know if there is any devices available.
107
+ return device_count() > 0
108
+
109
+
110
+ def synchronize() -> None:
111
+ r"""Waits for all jobs in all streams on a MTIA device to complete."""
112
+ return torch._C._mtia_deviceSynchronize()
113
+
114
+
115
+ def device_count() -> int:
116
+ r"""Return the number of MTIA devices available."""
117
+ return torch._C._accelerator_hooks_device_count()
118
+
119
+
120
+ def current_device() -> int:
121
+ r"""Return the index of a currently selected device."""
122
+ return torch._C._accelerator_hooks_get_current_device()
123
+
124
+
125
+ def current_stream(device: Optional[_device_t] = None) -> Stream:
126
+ r"""Return the currently selected :class:`Stream` for a given device.
127
+
128
+ Args:
129
+ device (torch.device or int, optional): selected device. Returns
130
+ the currently selected :class:`Stream` for the current device, given
131
+ by :func:`~torch.mtia.current_device`, if :attr:`device` is ``None``
132
+ (default).
133
+ """
134
+ return torch._C._mtia_getCurrentStream(_get_device_index(device, optional=True))
135
+
136
+
137
+ def default_stream(device: Optional[_device_t] = None) -> Stream:
138
+ r"""Return the default :class:`Stream` for a given device.
139
+
140
+ Args:
141
+ device (torch.device or int, optional): selected device. Returns
142
+ the default :class:`Stream` for the current device, given by
143
+ :func:`~torch.mtia.current_device`, if :attr:`device` is ``None``
144
+ (default).
145
+ """
146
+ return torch._C._mtia_getDefaultStream(_get_device_index(device, optional=True))
147
+
148
+
149
+ def set_stream(stream: Stream):
150
+ r"""Set the current stream.This is a wrapper API to set the stream.
151
+ Usage of this function is discouraged in favor of the ``stream``
152
+ context manager.
153
+
154
+ Args:
155
+ stream (Stream): selected stream. This function is a no-op
156
+ if this argument is ``None``.
157
+ """
158
+ if stream is None:
159
+ return
160
+ torch._C._mtia_setCurrentStream(stream)
161
+
162
+
163
+ class device:
164
+ r"""Context-manager that changes the selected device.
165
+
166
+ Args:
167
+ device (torch.device or int): device index to select. It's a no-op if
168
+ this argument is a negative integer or ``None``.
169
+ """
170
+
171
+ def __init__(self, device: Any):
172
+ self.idx = _get_device_index(device, optional=True)
173
+ self.prev_idx = -1
174
+
175
+ def __enter__(self):
176
+ self.prev_idx = torch._C._accelerator_hooks_maybe_exchange_device(self.idx)
177
+
178
+ def __exit__(self, type: Any, value: Any, traceback: Any):
179
+ self.idx = torch._C._accelerator_hooks_maybe_exchange_device(self.prev_idx)
180
+ return False
181
+
182
+
183
+ class StreamContext:
184
+ r"""Context-manager that selects a given stream.
185
+
186
+ All MTIA kernels queued within its context will be enqueued on a selected
187
+ stream.
188
+
189
+ Args:
190
+ Stream (Stream): selected stream. This manager is a no-op if it's
191
+ ``None``.
192
+ .. note:: Streams are per-device.
193
+ """
194
+
195
+ cur_stream: Optional["torch.mtia.Stream"]
196
+
197
+ def __init__(self, stream: Optional["torch.mtia.Stream"]):
198
+ self.stream = stream
199
+ self.idx = _get_device_index(None, True)
200
+ if not torch.jit.is_scripting():
201
+ if self.idx is None:
202
+ self.idx = -1
203
+
204
+ self.src_prev_stream = (
205
+ None if not torch.jit.is_scripting() else torch.mtia.default_stream(None)
206
+ )
207
+ self.dst_prev_stream = (
208
+ None if not torch.jit.is_scripting() else torch.mtia.default_stream(None)
209
+ )
210
+
211
+ def __enter__(self):
212
+ # Local cur_stream variable for type refinement
213
+ cur_stream = self.stream
214
+ # Return if stream is None or MTIA device not available
215
+ if cur_stream is None or self.idx == -1:
216
+ return
217
+ self.src_prev_stream = torch.mtia.current_stream(None)
218
+
219
+ # If the stream is not on the current device, then
220
+ # set the current stream on the device
221
+ if self.src_prev_stream.device != cur_stream.device:
222
+ with device(cur_stream.device):
223
+ self.dst_prev_stream = torch.mtia.current_stream(cur_stream.device)
224
+ torch.mtia.set_stream(cur_stream)
225
+
226
+ def __exit__(self, type: Any, value: Any, traceback: Any):
227
+ # Local cur_stream variable for type refinement
228
+ cur_stream = self.stream
229
+ # If stream is None or no MTIA device available, return
230
+ if cur_stream is None or self.idx == -1:
231
+ return
232
+
233
+ # Reset the stream on the original device
234
+ # and destination device
235
+ if self.src_prev_stream.device != cur_stream.device: # type: ignore[union-attr]
236
+ torch.mtia.set_stream(self.dst_prev_stream) # type: ignore[arg-type]
237
+ torch.mtia.set_stream(self.src_prev_stream) # type: ignore[arg-type]
238
+
239
+
240
+ def stream(stream: Optional["torch.mtia.Stream"]) -> StreamContext:
241
+ r"""Wrap around the Context-manager StreamContext that selects a given stream.
242
+
243
+ Arguments:
244
+ stream (Stream): selected stream. This manager is a no-op if it's
245
+ ``None``.
246
+ ..Note:: In eager mode stream is of type Stream class while in JIT it doesn't support torch.mtia.stream
247
+ """
248
+ return StreamContext(stream)
249
+
250
+
251
+ __all__ = [
252
+ "init",
253
+ "is_available",
254
+ "is_initialized",
255
+ "synchronize",
256
+ "device_count",
257
+ "current_device",
258
+ "current_stream",
259
+ "default_stream",
260
+ "set_stream",
261
+ "stream",
262
+ "device",
263
+ ]
parrot/lib/python3.10/site-packages/torch/mtia/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (7.82 kB). View file
 
parrot/lib/python3.10/site-packages/torch/mtia/__pycache__/_utils.cpython-310.pyc ADDED
Binary file (1.52 kB). View file