nnilayy commited on
Commit
c4f6b3e
·
verified ·
1 Parent(s): 4fe9b55

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -0
  2. lib/python3.10/site-packages/av/audio/layout.cpython-310-x86_64-linux-gnu.so +3 -0
  3. lib/python3.10/site-packages/av/audio/plane.cpython-310-x86_64-linux-gnu.so +3 -0
  4. lib/python3.10/site-packages/numba/cloudpickle/__init__.py +18 -0
  5. lib/python3.10/site-packages/numba/cloudpickle/cloudpickle.py +1504 -0
  6. lib/python3.10/site-packages/numba/cloudpickle/cloudpickle_fast.py +13 -0
  7. lib/python3.10/site-packages/numba/core/annotations/__init__.py +0 -0
  8. lib/python3.10/site-packages/numba/core/annotations/pretty_annotate.py +283 -0
  9. lib/python3.10/site-packages/numba/core/annotations/template.html +144 -0
  10. lib/python3.10/site-packages/numba/core/annotations/type_annotations.py +283 -0
  11. lib/python3.10/site-packages/numba/core/datamodel/__init__.py +4 -0
  12. lib/python3.10/site-packages/numba/core/datamodel/manager.py +68 -0
  13. lib/python3.10/site-packages/numba/core/datamodel/models.py +12 -0
  14. lib/python3.10/site-packages/numba/core/datamodel/new_models.py +1390 -0
  15. lib/python3.10/site-packages/numba/core/datamodel/old_models.py +1385 -0
  16. lib/python3.10/site-packages/numba/core/datamodel/packer.py +213 -0
  17. lib/python3.10/site-packages/numba/core/datamodel/registry.py +18 -0
  18. lib/python3.10/site-packages/numba/core/datamodel/testing.py +150 -0
  19. lib/python3.10/site-packages/numba/core/rewrites/__init__.py +8 -0
  20. lib/python3.10/site-packages/numba/core/rewrites/ir_print.py +82 -0
  21. lib/python3.10/site-packages/numba/core/rewrites/registry.py +98 -0
  22. lib/python3.10/site-packages/numba/core/rewrites/static_binop.py +35 -0
  23. lib/python3.10/site-packages/numba/core/rewrites/static_getitem.py +175 -0
  24. lib/python3.10/site-packages/numba/core/rewrites/static_raise.py +93 -0
  25. lib/python3.10/site-packages/numba/core/types/__init__.py +386 -0
  26. lib/python3.10/site-packages/numba/core/types/abstract.py +512 -0
  27. lib/python3.10/site-packages/numba/core/types/common.py +104 -0
  28. lib/python3.10/site-packages/numba/core/types/containers.py +974 -0
  29. lib/python3.10/site-packages/numba/core/types/function_type.py +211 -0
  30. lib/python3.10/site-packages/numba/core/types/functions.py +743 -0
  31. lib/python3.10/site-packages/numba/core/types/iterators.py +108 -0
  32. lib/python3.10/site-packages/numba/core/types/misc.py +556 -0
  33. lib/python3.10/site-packages/numba/core/types/new_scalars/__init__.py +18 -0
  34. lib/python3.10/site-packages/numba/core/types/new_scalars/machine_types.py +119 -0
  35. lib/python3.10/site-packages/numba/core/types/new_scalars/numpy_types.py +142 -0
  36. lib/python3.10/site-packages/numba/core/types/new_scalars/python_types.py +130 -0
  37. lib/python3.10/site-packages/numba/core/types/new_scalars/scalars.py +161 -0
  38. lib/python3.10/site-packages/numba/core/types/npytypes.py +649 -0
  39. lib/python3.10/site-packages/numba/core/types/old_scalars.py +270 -0
  40. lib/python3.10/site-packages/numba/core/types/scalars.py +12 -0
  41. lib/python3.10/site-packages/numba/core/typing/__init__.py +3 -0
  42. lib/python3.10/site-packages/numba/core/typing/arraydecl.py +880 -0
  43. lib/python3.10/site-packages/numba/core/typing/bufproto.py +79 -0
  44. lib/python3.10/site-packages/numba/core/typing/context.py +741 -0
  45. lib/python3.10/site-packages/numba/core/typing/mathdecl.py +14 -0
  46. lib/python3.10/site-packages/numba/core/typing/new_cmathdecl.py +50 -0
  47. lib/python3.10/site-packages/numba/core/typing/new_mathdecl.py +107 -0
  48. lib/python3.10/site-packages/numba/core/typing/templates.py +1337 -0
  49. lib/python3.10/site-packages/numba/core/unsafe/__init__.py +0 -0
  50. lib/python3.10/site-packages/numba/core/unsafe/bytes.py +49 -0
.gitattributes CHANGED
@@ -97,3 +97,5 @@ lib/python3.10/site-packages/av/video/frame.cpython-310-x86_64-linux-gnu.so filt
97
  lib/python3.10/site-packages/av/sidedata/sidedata.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
98
  lib/python3.10/site-packages/av/video/format.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
99
  lib/python3.10/site-packages/av/sidedata/motionvectors.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
 
 
 
97
  lib/python3.10/site-packages/av/sidedata/sidedata.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
98
  lib/python3.10/site-packages/av/video/format.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
99
  lib/python3.10/site-packages/av/sidedata/motionvectors.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
100
+ lib/python3.10/site-packages/av/audio/layout.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
101
+ lib/python3.10/site-packages/av/audio/plane.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
lib/python3.10/site-packages/av/audio/layout.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18d6c3947b0f0a21387952830fd743a2a2a8c1877783518ff541d87360753bae
3
+ size 429649
lib/python3.10/site-packages/av/audio/plane.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce1e8f5615289e7572880313c889c7d54b8930d6373886ef2d1b04150d78f85a
3
+ size 363449
lib/python3.10/site-packages/numba/cloudpickle/__init__.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from . import cloudpickle
2
+ from .cloudpickle import * # noqa
3
+
4
+ __doc__ = cloudpickle.__doc__
5
+
6
+ __version__ = "3.0.0"
7
+
8
+ __all__ = [ # noqa
9
+ "__version__",
10
+ "Pickler",
11
+ "CloudPickler",
12
+ "dumps",
13
+ "loads",
14
+ "dump",
15
+ "load",
16
+ "register_pickle_by_value",
17
+ "unregister_pickle_by_value",
18
+ ]
lib/python3.10/site-packages/numba/cloudpickle/cloudpickle.py ADDED
@@ -0,0 +1,1504 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This is a modified version of the cloudpickle module.
3
+ Patches:
4
+ - https://github.com/numba/numba/pull/7388
5
+ Avoid resetting class state of dynamic classes.
6
+
7
+ Original module docstring:
8
+
9
+ Pickler class to extend the standard pickle.Pickler functionality
10
+
11
+ The main objective is to make it natural to perform distributed computing on
12
+ clusters (such as PySpark, Dask, Ray...) with interactively defined code
13
+ (functions, classes, ...) written in notebooks or console.
14
+
15
+ In particular this pickler adds the following features:
16
+ - serialize interactively-defined or locally-defined functions, classes,
17
+ enums, typevars, lambdas and nested functions to compiled byte code;
18
+ - deal with some other non-serializable objects in an ad-hoc manner where
19
+ applicable.
20
+
21
+ This pickler is therefore meant to be used for the communication between short
22
+ lived Python processes running the same version of Python and libraries. In
23
+ particular, it is not meant to be used for long term storage of Python objects.
24
+
25
+ It does not include an unpickler, as standard Python unpickling suffices.
26
+
27
+ This module was extracted from the `cloud` package, developed by `PiCloud, Inc.
28
+ <https://web.archive.org/web/20140626004012/http://www.picloud.com/>`_.
29
+
30
+ Copyright (c) 2012-now, CloudPickle developers and contributors.
31
+ Copyright (c) 2012, Regents of the University of California.
32
+ Copyright (c) 2009 `PiCloud, Inc. <https://web.archive.org/web/20140626004012/http://www.picloud.com/>`_.
33
+ All rights reserved.
34
+
35
+ Redistribution and use in source and binary forms, with or without
36
+ modification, are permitted provided that the following conditions
37
+ are met:
38
+ * Redistributions of source code must retain the above copyright
39
+ notice, this list of conditions and the following disclaimer.
40
+ * Redistributions in binary form must reproduce the above copyright
41
+ notice, this list of conditions and the following disclaimer in the
42
+ documentation and/or other materials provided with the distribution.
43
+ * Neither the name of the University of California, Berkeley nor the
44
+ names of its contributors may be used to endorse or promote
45
+ products derived from this software without specific prior written
46
+ permission.
47
+
48
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
49
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
50
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
51
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
52
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
53
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
54
+ TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
55
+ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
56
+ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
57
+ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
58
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59
+ """
60
+
61
+ import _collections_abc
62
+ from collections import ChainMap, OrderedDict
63
+ import abc
64
+ import builtins
65
+ import copyreg
66
+ import dataclasses
67
+ import dis
68
+ from enum import Enum
69
+ import io
70
+ import itertools
71
+ import logging
72
+ import opcode
73
+ import pickle
74
+ from pickle import _getattribute
75
+ import platform
76
+ import struct
77
+ import sys
78
+ import threading
79
+ import types
80
+ import typing
81
+ import uuid
82
+ import warnings
83
+ import weakref
84
+
85
+ # The following import is required to be imported in the cloudpickle
86
+ # namespace to be able to load pickle files generated with older versions of
87
+ # cloudpickle. See: tests/test_backward_compat.py
88
+ from types import CellType # noqa: F401
89
+
90
+
91
+ # cloudpickle is meant for inter process communication: we expect all
92
+ # communicating processes to run the same Python version hence we favor
93
+ # communication speed over compatibility:
94
+ DEFAULT_PROTOCOL = pickle.HIGHEST_PROTOCOL
95
+
96
+ # Names of modules whose resources should be treated as dynamic.
97
+ _PICKLE_BY_VALUE_MODULES = set()
98
+
99
+ # Track the provenance of reconstructed dynamic classes to make it possible to
100
+ # reconstruct instances from the matching singleton class definition when
101
+ # appropriate and preserve the usual "isinstance" semantics of Python objects.
102
+ _DYNAMIC_CLASS_TRACKER_BY_CLASS = weakref.WeakKeyDictionary()
103
+ _DYNAMIC_CLASS_TRACKER_BY_ID = weakref.WeakValueDictionary()
104
+ _DYNAMIC_CLASS_TRACKER_LOCK = threading.Lock()
105
+ _DYNAMIC_CLASS_TRACKER_REUSING = weakref.WeakSet()
106
+
107
+ PYPY = platform.python_implementation() == "PyPy"
108
+
109
+ builtin_code_type = None
110
+ if PYPY:
111
+ # builtin-code objects only exist in pypy
112
+ builtin_code_type = type(float.__new__.__code__)
113
+
114
+ _extract_code_globals_cache = weakref.WeakKeyDictionary()
115
+
116
+
117
+ def _get_or_create_tracker_id(class_def):
118
+ with _DYNAMIC_CLASS_TRACKER_LOCK:
119
+ class_tracker_id = _DYNAMIC_CLASS_TRACKER_BY_CLASS.get(class_def)
120
+ if class_tracker_id is None:
121
+ class_tracker_id = uuid.uuid4().hex
122
+ _DYNAMIC_CLASS_TRACKER_BY_CLASS[class_def] = class_tracker_id
123
+ _DYNAMIC_CLASS_TRACKER_BY_ID[class_tracker_id] = class_def
124
+ return class_tracker_id
125
+
126
+
127
+ def _lookup_class_or_track(class_tracker_id, class_def):
128
+ if class_tracker_id is not None:
129
+ with _DYNAMIC_CLASS_TRACKER_LOCK:
130
+ orig_class_def = class_def
131
+ class_def = _DYNAMIC_CLASS_TRACKER_BY_ID.setdefault(
132
+ class_tracker_id, class_def
133
+ )
134
+ _DYNAMIC_CLASS_TRACKER_BY_CLASS[class_def] = class_tracker_id
135
+ # Check if we are reusing a previous class_def
136
+ if orig_class_def is not class_def:
137
+ # Remember the class_def is being reused
138
+ _DYNAMIC_CLASS_TRACKER_REUSING.add(class_def)
139
+ return class_def
140
+
141
+
142
+ def register_pickle_by_value(module):
143
+ """Register a module to make it functions and classes picklable by value.
144
+
145
+ By default, functions and classes that are attributes of an importable
146
+ module are to be pickled by reference, that is relying on re-importing
147
+ the attribute from the module at load time.
148
+
149
+ If `register_pickle_by_value(module)` is called, all its functions and
150
+ classes are subsequently to be pickled by value, meaning that they can
151
+ be loaded in Python processes where the module is not importable.
152
+
153
+ This is especially useful when developing a module in a distributed
154
+ execution environment: restarting the client Python process with the new
155
+ source code is enough: there is no need to re-install the new version
156
+ of the module on all the worker nodes nor to restart the workers.
157
+
158
+ Note: this feature is considered experimental. See the cloudpickle
159
+ README.md file for more details and limitations.
160
+ """
161
+ if not isinstance(module, types.ModuleType):
162
+ raise ValueError(f"Input should be a module object, got {str(module)} instead")
163
+ # In the future, cloudpickle may need a way to access any module registered
164
+ # for pickling by value in order to introspect relative imports inside
165
+ # functions pickled by value. (see
166
+ # https://github.com/cloudpipe/cloudpickle/pull/417#issuecomment-873684633).
167
+ # This access can be ensured by checking that module is present in
168
+ # sys.modules at registering time and assuming that it will still be in
169
+ # there when accessed during pickling. Another alternative would be to
170
+ # store a weakref to the module. Even though cloudpickle does not implement
171
+ # this introspection yet, in order to avoid a possible breaking change
172
+ # later, we still enforce the presence of module inside sys.modules.
173
+ if module.__name__ not in sys.modules:
174
+ raise ValueError(
175
+ f"{module} was not imported correctly, have you used an "
176
+ "`import` statement to access it?"
177
+ )
178
+ _PICKLE_BY_VALUE_MODULES.add(module.__name__)
179
+
180
+
181
+ def unregister_pickle_by_value(module):
182
+ """Unregister that the input module should be pickled by value."""
183
+ if not isinstance(module, types.ModuleType):
184
+ raise ValueError(f"Input should be a module object, got {str(module)} instead")
185
+ if module.__name__ not in _PICKLE_BY_VALUE_MODULES:
186
+ raise ValueError(f"{module} is not registered for pickle by value")
187
+ else:
188
+ _PICKLE_BY_VALUE_MODULES.remove(module.__name__)
189
+
190
+
191
+ def list_registry_pickle_by_value():
192
+ return _PICKLE_BY_VALUE_MODULES.copy()
193
+
194
+
195
+ def _is_registered_pickle_by_value(module):
196
+ module_name = module.__name__
197
+ if module_name in _PICKLE_BY_VALUE_MODULES:
198
+ return True
199
+ while True:
200
+ parent_name = module_name.rsplit(".", 1)[0]
201
+ if parent_name == module_name:
202
+ break
203
+ if parent_name in _PICKLE_BY_VALUE_MODULES:
204
+ return True
205
+ module_name = parent_name
206
+ return False
207
+
208
+
209
+ def _whichmodule(obj, name):
210
+ """Find the module an object belongs to.
211
+
212
+ This function differs from ``pickle.whichmodule`` in two ways:
213
+ - it does not mangle the cases where obj's module is __main__ and obj was
214
+ not found in any module.
215
+ - Errors arising during module introspection are ignored, as those errors
216
+ are considered unwanted side effects.
217
+ """
218
+ module_name = getattr(obj, "__module__", None)
219
+
220
+ if module_name is not None:
221
+ return module_name
222
+ # Protect the iteration by using a copy of sys.modules against dynamic
223
+ # modules that trigger imports of other modules upon calls to getattr or
224
+ # other threads importing at the same time.
225
+ for module_name, module in sys.modules.copy().items():
226
+ # Some modules such as coverage can inject non-module objects inside
227
+ # sys.modules
228
+ if (
229
+ module_name == "__main__"
230
+ or module is None
231
+ or not isinstance(module, types.ModuleType)
232
+ ):
233
+ continue
234
+ try:
235
+ if _getattribute(module, name)[0] is obj:
236
+ return module_name
237
+ except Exception:
238
+ pass
239
+ return None
240
+
241
+
242
+ def _should_pickle_by_reference(obj, name=None):
243
+ """Test whether an function or a class should be pickled by reference
244
+
245
+ Pickling by reference means by that the object (typically a function or a
246
+ class) is an attribute of a module that is assumed to be importable in the
247
+ target Python environment. Loading will therefore rely on importing the
248
+ module and then calling `getattr` on it to access the function or class.
249
+
250
+ Pickling by reference is the only option to pickle functions and classes
251
+ in the standard library. In cloudpickle the alternative option is to
252
+ pickle by value (for instance for interactively or locally defined
253
+ functions and classes or for attributes of modules that have been
254
+ explicitly registered to be pickled by value.
255
+ """
256
+ if isinstance(obj, types.FunctionType) or issubclass(type(obj), type):
257
+ module_and_name = _lookup_module_and_qualname(obj, name=name)
258
+ if module_and_name is None:
259
+ return False
260
+ module, name = module_and_name
261
+ return not _is_registered_pickle_by_value(module)
262
+
263
+ elif isinstance(obj, types.ModuleType):
264
+ # We assume that sys.modules is primarily used as a cache mechanism for
265
+ # the Python import machinery. Checking if a module has been added in
266
+ # is sys.modules therefore a cheap and simple heuristic to tell us
267
+ # whether we can assume that a given module could be imported by name
268
+ # in another Python process.
269
+ if _is_registered_pickle_by_value(obj):
270
+ return False
271
+ return obj.__name__ in sys.modules
272
+ else:
273
+ raise TypeError(
274
+ "cannot check importability of {} instances".format(type(obj).__name__)
275
+ )
276
+
277
+
278
+ def _lookup_module_and_qualname(obj, name=None):
279
+ if name is None:
280
+ name = getattr(obj, "__qualname__", None)
281
+ if name is None: # pragma: no cover
282
+ # This used to be needed for Python 2.7 support but is probably not
283
+ # needed anymore. However we keep the __name__ introspection in case
284
+ # users of cloudpickle rely on this old behavior for unknown reasons.
285
+ name = getattr(obj, "__name__", None)
286
+
287
+ module_name = _whichmodule(obj, name)
288
+
289
+ if module_name is None:
290
+ # In this case, obj.__module__ is None AND obj was not found in any
291
+ # imported module. obj is thus treated as dynamic.
292
+ return None
293
+
294
+ if module_name == "__main__":
295
+ return None
296
+
297
+ # Note: if module_name is in sys.modules, the corresponding module is
298
+ # assumed importable at unpickling time. See #357
299
+ module = sys.modules.get(module_name, None)
300
+ if module is None:
301
+ # The main reason why obj's module would not be imported is that this
302
+ # module has been dynamically created, using for example
303
+ # types.ModuleType. The other possibility is that module was removed
304
+ # from sys.modules after obj was created/imported. But this case is not
305
+ # supported, as the standard pickle does not support it either.
306
+ return None
307
+
308
+ try:
309
+ obj2, parent = _getattribute(module, name)
310
+ except AttributeError:
311
+ # obj was not found inside the module it points to
312
+ return None
313
+ if obj2 is not obj:
314
+ return None
315
+ return module, name
316
+
317
+
318
+ def _extract_code_globals(co):
319
+ """Find all globals names read or written to by codeblock co."""
320
+ out_names = _extract_code_globals_cache.get(co)
321
+ if out_names is None:
322
+ # We use a dict with None values instead of a set to get a
323
+ # deterministic order and avoid introducing non-deterministic pickle
324
+ # bytes as a results.
325
+ out_names = {name: None for name in _walk_global_ops(co)}
326
+
327
+ # Declaring a function inside another one using the "def ..." syntax
328
+ # generates a constant code object corresponding to the one of the
329
+ # nested function's As the nested function may itself need global
330
+ # variables, we need to introspect its code, extract its globals, (look
331
+ # for code object in it's co_consts attribute..) and add the result to
332
+ # code_globals
333
+ if co.co_consts:
334
+ for const in co.co_consts:
335
+ if isinstance(const, types.CodeType):
336
+ out_names.update(_extract_code_globals(const))
337
+
338
+ _extract_code_globals_cache[co] = out_names
339
+
340
+ return out_names
341
+
342
+
343
+ def _find_imported_submodules(code, top_level_dependencies):
344
+ """Find currently imported submodules used by a function.
345
+
346
+ Submodules used by a function need to be detected and referenced for the
347
+ function to work correctly at depickling time. Because submodules can be
348
+ referenced as attribute of their parent package (``package.submodule``), we
349
+ need a special introspection technique that does not rely on GLOBAL-related
350
+ opcodes to find references of them in a code object.
351
+
352
+ Example:
353
+ ```
354
+ import concurrent.futures
355
+ import cloudpickle
356
+ def func():
357
+ x = concurrent.futures.ThreadPoolExecutor
358
+ if __name__ == '__main__':
359
+ cloudpickle.dumps(func)
360
+ ```
361
+ The globals extracted by cloudpickle in the function's state include the
362
+ concurrent package, but not its submodule (here, concurrent.futures), which
363
+ is the module used by func. Find_imported_submodules will detect the usage
364
+ of concurrent.futures. Saving this module alongside with func will ensure
365
+ that calling func once depickled does not fail due to concurrent.futures
366
+ not being imported
367
+ """
368
+
369
+ subimports = []
370
+ # check if any known dependency is an imported package
371
+ for x in top_level_dependencies:
372
+ if (
373
+ isinstance(x, types.ModuleType)
374
+ and hasattr(x, "__package__")
375
+ and x.__package__
376
+ ):
377
+ # check if the package has any currently loaded sub-imports
378
+ prefix = x.__name__ + "."
379
+ # A concurrent thread could mutate sys.modules,
380
+ # make sure we iterate over a copy to avoid exceptions
381
+ for name in list(sys.modules):
382
+ # Older versions of pytest will add a "None" module to
383
+ # sys.modules.
384
+ if name is not None and name.startswith(prefix):
385
+ # check whether the function can address the sub-module
386
+ tokens = set(name[len(prefix) :].split("."))
387
+ if not tokens - set(code.co_names):
388
+ subimports.append(sys.modules[name])
389
+ return subimports
390
+
391
+
392
+ # relevant opcodes
393
+ STORE_GLOBAL = opcode.opmap["STORE_GLOBAL"]
394
+ DELETE_GLOBAL = opcode.opmap["DELETE_GLOBAL"]
395
+ LOAD_GLOBAL = opcode.opmap["LOAD_GLOBAL"]
396
+ GLOBAL_OPS = (STORE_GLOBAL, DELETE_GLOBAL, LOAD_GLOBAL)
397
+ HAVE_ARGUMENT = dis.HAVE_ARGUMENT
398
+ EXTENDED_ARG = dis.EXTENDED_ARG
399
+
400
+
401
+ _BUILTIN_TYPE_NAMES = {}
402
+ for k, v in types.__dict__.items():
403
+ if type(v) is type:
404
+ _BUILTIN_TYPE_NAMES[v] = k
405
+
406
+
407
+ def _builtin_type(name):
408
+ if name == "ClassType": # pragma: no cover
409
+ # Backward compat to load pickle files generated with cloudpickle
410
+ # < 1.3 even if loading pickle files from older versions is not
411
+ # officially supported.
412
+ return type
413
+ return getattr(types, name)
414
+
415
+
416
+ def _walk_global_ops(code):
417
+ """Yield referenced name for global-referencing instructions in code."""
418
+ for instr in dis.get_instructions(code):
419
+ op = instr.opcode
420
+ if op in GLOBAL_OPS:
421
+ yield instr.argval
422
+
423
+
424
+ def _extract_class_dict(cls):
425
+ """Retrieve a copy of the dict of a class without the inherited method."""
426
+ clsdict = dict(cls.__dict__) # copy dict proxy to a dict
427
+ if len(cls.__bases__) == 1:
428
+ inherited_dict = cls.__bases__[0].__dict__
429
+ else:
430
+ inherited_dict = {}
431
+ for base in reversed(cls.__bases__):
432
+ inherited_dict.update(base.__dict__)
433
+ to_remove = []
434
+ for name, value in clsdict.items():
435
+ try:
436
+ base_value = inherited_dict[name]
437
+ if value is base_value:
438
+ to_remove.append(name)
439
+ except KeyError:
440
+ pass
441
+ for name in to_remove:
442
+ clsdict.pop(name)
443
+ return clsdict
444
+
445
+
446
+ def is_tornado_coroutine(func):
447
+ """Return whether `func` is a Tornado coroutine function.
448
+
449
+ Running coroutines are not supported.
450
+ """
451
+ warnings.warn(
452
+ "is_tornado_coroutine is deprecated in cloudpickle 3.0 and will be "
453
+ "removed in cloudpickle 4.0. Use tornado.gen.is_coroutine_function "
454
+ "directly instead.",
455
+ category=DeprecationWarning,
456
+ )
457
+ if "tornado.gen" not in sys.modules:
458
+ return False
459
+ gen = sys.modules["tornado.gen"]
460
+ if not hasattr(gen, "is_coroutine_function"):
461
+ # Tornado version is too old
462
+ return False
463
+ return gen.is_coroutine_function(func)
464
+
465
+
466
+ def subimport(name):
467
+ # We cannot do simply: `return __import__(name)`: Indeed, if ``name`` is
468
+ # the name of a submodule, __import__ will return the top-level root module
469
+ # of this submodule. For instance, __import__('os.path') returns the `os`
470
+ # module.
471
+ __import__(name)
472
+ return sys.modules[name]
473
+
474
+
475
+ def dynamic_subimport(name, vars):
476
+ mod = types.ModuleType(name)
477
+ mod.__dict__.update(vars)
478
+ mod.__dict__["__builtins__"] = builtins.__dict__
479
+ return mod
480
+
481
+
482
+ def _get_cell_contents(cell):
483
+ try:
484
+ return cell.cell_contents
485
+ except ValueError:
486
+ # Handle empty cells explicitly with a sentinel value.
487
+ return _empty_cell_value
488
+
489
+
490
+ def instance(cls):
491
+ """Create a new instance of a class.
492
+
493
+ Parameters
494
+ ----------
495
+ cls : type
496
+ The class to create an instance of.
497
+
498
+ Returns
499
+ -------
500
+ instance : cls
501
+ A new instance of ``cls``.
502
+ """
503
+ return cls()
504
+
505
+
506
+ @instance
507
+ class _empty_cell_value:
508
+ """Sentinel for empty closures."""
509
+
510
+ @classmethod
511
+ def __reduce__(cls):
512
+ return cls.__name__
513
+
514
+
515
+ def _make_function(code, globals, name, argdefs, closure):
516
+ # Setting __builtins__ in globals is needed for nogil CPython.
517
+ globals["__builtins__"] = __builtins__
518
+ return types.FunctionType(code, globals, name, argdefs, closure)
519
+
520
+
521
+ def _make_empty_cell():
522
+ if False:
523
+ # trick the compiler into creating an empty cell in our lambda
524
+ cell = None
525
+ raise AssertionError("this route should not be executed")
526
+
527
+ return (lambda: cell).__closure__[0]
528
+
529
+
530
+ def _make_cell(value=_empty_cell_value):
531
+ cell = _make_empty_cell()
532
+ if value is not _empty_cell_value:
533
+ cell.cell_contents = value
534
+ return cell
535
+
536
+
537
+ def _make_skeleton_class(
538
+ type_constructor, name, bases, type_kwargs, class_tracker_id, extra
539
+ ):
540
+ """Build dynamic class with an empty __dict__ to be filled once memoized
541
+
542
+ If class_tracker_id is not None, try to lookup an existing class definition
543
+ matching that id. If none is found, track a newly reconstructed class
544
+ definition under that id so that other instances stemming from the same
545
+ class id will also reuse this class definition.
546
+
547
+ The "extra" variable is meant to be a dict (or None) that can be used for
548
+ forward compatibility shall the need arise.
549
+ """
550
+ skeleton_class = types.new_class(
551
+ name, bases, {"metaclass": type_constructor}, lambda ns: ns.update(type_kwargs)
552
+ )
553
+ return _lookup_class_or_track(class_tracker_id, skeleton_class)
554
+
555
+
556
+ def _make_skeleton_enum(
557
+ bases, name, qualname, members, module, class_tracker_id, extra
558
+ ):
559
+ """Build dynamic enum with an empty __dict__ to be filled once memoized
560
+
561
+ The creation of the enum class is inspired by the code of
562
+ EnumMeta._create_.
563
+
564
+ If class_tracker_id is not None, try to lookup an existing enum definition
565
+ matching that id. If none is found, track a newly reconstructed enum
566
+ definition under that id so that other instances stemming from the same
567
+ class id will also reuse this enum definition.
568
+
569
+ The "extra" variable is meant to be a dict (or None) that can be used for
570
+ forward compatibility shall the need arise.
571
+ """
572
+ # enums always inherit from their base Enum class at the last position in
573
+ # the list of base classes:
574
+ enum_base = bases[-1]
575
+ metacls = enum_base.__class__
576
+ classdict = metacls.__prepare__(name, bases)
577
+
578
+ for member_name, member_value in members.items():
579
+ classdict[member_name] = member_value
580
+ enum_class = metacls.__new__(metacls, name, bases, classdict)
581
+ enum_class.__module__ = module
582
+ enum_class.__qualname__ = qualname
583
+
584
+ return _lookup_class_or_track(class_tracker_id, enum_class)
585
+
586
+
587
+ def _make_typevar(name, bound, constraints, covariant, contravariant, class_tracker_id):
588
+ tv = typing.TypeVar(
589
+ name,
590
+ *constraints,
591
+ bound=bound,
592
+ covariant=covariant,
593
+ contravariant=contravariant,
594
+ )
595
+ return _lookup_class_or_track(class_tracker_id, tv)
596
+
597
+
598
+ def _decompose_typevar(obj):
599
+ return (
600
+ obj.__name__,
601
+ obj.__bound__,
602
+ obj.__constraints__,
603
+ obj.__covariant__,
604
+ obj.__contravariant__,
605
+ _get_or_create_tracker_id(obj),
606
+ )
607
+
608
+
609
+ def _typevar_reduce(obj):
610
+ # TypeVar instances require the module information hence why we
611
+ # are not using the _should_pickle_by_reference directly
612
+ module_and_name = _lookup_module_and_qualname(obj, name=obj.__name__)
613
+
614
+ if module_and_name is None:
615
+ return (_make_typevar, _decompose_typevar(obj))
616
+ elif _is_registered_pickle_by_value(module_and_name[0]):
617
+ return (_make_typevar, _decompose_typevar(obj))
618
+
619
+ return (getattr, module_and_name)
620
+
621
+
622
+ def _get_bases(typ):
623
+ if "__orig_bases__" in getattr(typ, "__dict__", {}):
624
+ # For generic types (see PEP 560)
625
+ # Note that simply checking `hasattr(typ, '__orig_bases__')` is not
626
+ # correct. Subclasses of a fully-parameterized generic class does not
627
+ # have `__orig_bases__` defined, but `hasattr(typ, '__orig_bases__')`
628
+ # will return True because it's defined in the base class.
629
+ bases_attr = "__orig_bases__"
630
+ else:
631
+ # For regular class objects
632
+ bases_attr = "__bases__"
633
+ return getattr(typ, bases_attr)
634
+
635
+
636
+ def _make_dict_keys(obj, is_ordered=False):
637
+ if is_ordered:
638
+ return OrderedDict.fromkeys(obj).keys()
639
+ else:
640
+ return dict.fromkeys(obj).keys()
641
+
642
+
643
+ def _make_dict_values(obj, is_ordered=False):
644
+ if is_ordered:
645
+ return OrderedDict((i, _) for i, _ in enumerate(obj)).values()
646
+ else:
647
+ return {i: _ for i, _ in enumerate(obj)}.values()
648
+
649
+
650
+ def _make_dict_items(obj, is_ordered=False):
651
+ if is_ordered:
652
+ return OrderedDict(obj).items()
653
+ else:
654
+ return obj.items()
655
+
656
+
657
+ # COLLECTION OF OBJECTS __getnewargs__-LIKE METHODS
658
+ # -------------------------------------------------
659
+
660
+
661
+ def _class_getnewargs(obj):
662
+ type_kwargs = {}
663
+ if "__module__" in obj.__dict__:
664
+ type_kwargs["__module__"] = obj.__module__
665
+
666
+ __dict__ = obj.__dict__.get("__dict__", None)
667
+ if isinstance(__dict__, property):
668
+ type_kwargs["__dict__"] = __dict__
669
+
670
+ return (
671
+ type(obj),
672
+ obj.__name__,
673
+ _get_bases(obj),
674
+ type_kwargs,
675
+ _get_or_create_tracker_id(obj),
676
+ None,
677
+ )
678
+
679
+
680
+ def _enum_getnewargs(obj):
681
+ members = {e.name: e.value for e in obj}
682
+ return (
683
+ obj.__bases__,
684
+ obj.__name__,
685
+ obj.__qualname__,
686
+ members,
687
+ obj.__module__,
688
+ _get_or_create_tracker_id(obj),
689
+ None,
690
+ )
691
+
692
+
693
+ # COLLECTION OF OBJECTS RECONSTRUCTORS
694
+ # ------------------------------------
695
+ def _file_reconstructor(retval):
696
+ return retval
697
+
698
+
699
+ # COLLECTION OF OBJECTS STATE GETTERS
700
+ # -----------------------------------
701
+
702
+
703
+ def _function_getstate(func):
704
+ # - Put func's dynamic attributes (stored in func.__dict__) in state. These
705
+ # attributes will be restored at unpickling time using
706
+ # f.__dict__.update(state)
707
+ # - Put func's members into slotstate. Such attributes will be restored at
708
+ # unpickling time by iterating over slotstate and calling setattr(func,
709
+ # slotname, slotvalue)
710
+ slotstate = {
711
+ "__name__": func.__name__,
712
+ "__qualname__": func.__qualname__,
713
+ "__annotations__": func.__annotations__,
714
+ "__kwdefaults__": func.__kwdefaults__,
715
+ "__defaults__": func.__defaults__,
716
+ "__module__": func.__module__,
717
+ "__doc__": func.__doc__,
718
+ "__closure__": func.__closure__,
719
+ }
720
+
721
+ f_globals_ref = _extract_code_globals(func.__code__)
722
+ f_globals = {k: func.__globals__[k] for k in f_globals_ref if k in func.__globals__}
723
+
724
+ if func.__closure__ is not None:
725
+ closure_values = list(map(_get_cell_contents, func.__closure__))
726
+ else:
727
+ closure_values = ()
728
+
729
+ # Extract currently-imported submodules used by func. Storing these modules
730
+ # in a smoke _cloudpickle_subimports attribute of the object's state will
731
+ # trigger the side effect of importing these modules at unpickling time
732
+ # (which is necessary for func to work correctly once depickled)
733
+ slotstate["_cloudpickle_submodules"] = _find_imported_submodules(
734
+ func.__code__, itertools.chain(f_globals.values(), closure_values)
735
+ )
736
+ slotstate["__globals__"] = f_globals
737
+
738
+ state = func.__dict__
739
+ return state, slotstate
740
+
741
+
742
+ def _class_getstate(obj):
743
+ clsdict = _extract_class_dict(obj)
744
+ clsdict.pop("__weakref__", None)
745
+
746
+ if issubclass(type(obj), abc.ABCMeta):
747
+ # If obj is an instance of an ABCMeta subclass, don't pickle the
748
+ # cache/negative caches populated during isinstance/issubclass
749
+ # checks, but pickle the list of registered subclasses of obj.
750
+ clsdict.pop("_abc_cache", None)
751
+ clsdict.pop("_abc_negative_cache", None)
752
+ clsdict.pop("_abc_negative_cache_version", None)
753
+ registry = clsdict.pop("_abc_registry", None)
754
+ if registry is None:
755
+ # The abc caches and registered subclasses of a
756
+ # class are bundled into the single _abc_impl attribute
757
+ clsdict.pop("_abc_impl", None)
758
+ (registry, _, _, _) = abc._get_dump(obj)
759
+
760
+ clsdict["_abc_impl"] = [subclass_weakref() for subclass_weakref in registry]
761
+ else:
762
+ # In the above if clause, registry is a set of weakrefs -- in
763
+ # this case, registry is a WeakSet
764
+ clsdict["_abc_impl"] = [type_ for type_ in registry]
765
+
766
+ if "__slots__" in clsdict:
767
+ # pickle string length optimization: member descriptors of obj are
768
+ # created automatically from obj's __slots__ attribute, no need to
769
+ # save them in obj's state
770
+ if isinstance(obj.__slots__, str):
771
+ clsdict.pop(obj.__slots__)
772
+ else:
773
+ for k in obj.__slots__:
774
+ clsdict.pop(k, None)
775
+
776
+ clsdict.pop("__dict__", None) # unpicklable property object
777
+
778
+ return (clsdict, {})
779
+
780
+
781
+ def _enum_getstate(obj):
782
+ clsdict, slotstate = _class_getstate(obj)
783
+
784
+ members = {e.name: e.value for e in obj}
785
+ # Cleanup the clsdict that will be passed to _make_skeleton_enum:
786
+ # Those attributes are already handled by the metaclass.
787
+ for attrname in [
788
+ "_generate_next_value_",
789
+ "_member_names_",
790
+ "_member_map_",
791
+ "_member_type_",
792
+ "_value2member_map_",
793
+ ]:
794
+ clsdict.pop(attrname, None)
795
+ for member in members:
796
+ clsdict.pop(member)
797
+ # Special handling of Enum subclasses
798
+ return clsdict, slotstate
799
+
800
+
801
+ # COLLECTIONS OF OBJECTS REDUCERS
802
+ # -------------------------------
803
+ # A reducer is a function taking a single argument (obj), and that returns a
804
+ # tuple with all the necessary data to re-construct obj. Apart from a few
805
+ # exceptions (list, dict, bytes, int, etc.), a reducer is necessary to
806
+ # correctly pickle an object.
807
+ # While many built-in objects (Exceptions objects, instances of the "object"
808
+ # class, etc), are shipped with their own built-in reducer (invoked using
809
+ # obj.__reduce__), some do not. The following methods were created to "fill
810
+ # these holes".
811
+
812
+
813
+ def _code_reduce(obj):
814
+ """code object reducer."""
815
+ # If you are not sure about the order of arguments, take a look at help
816
+ # of the specific type from types, for example:
817
+ # >>> from types import CodeType
818
+ # >>> help(CodeType)
819
+ if hasattr(obj, "co_exceptiontable"):
820
+ # Python 3.11 and later: there are some new attributes
821
+ # related to the enhanced exceptions.
822
+ args = (
823
+ obj.co_argcount,
824
+ obj.co_posonlyargcount,
825
+ obj.co_kwonlyargcount,
826
+ obj.co_nlocals,
827
+ obj.co_stacksize,
828
+ obj.co_flags,
829
+ obj.co_code,
830
+ obj.co_consts,
831
+ obj.co_names,
832
+ obj.co_varnames,
833
+ obj.co_filename,
834
+ obj.co_name,
835
+ obj.co_qualname,
836
+ obj.co_firstlineno,
837
+ obj.co_linetable,
838
+ obj.co_exceptiontable,
839
+ obj.co_freevars,
840
+ obj.co_cellvars,
841
+ )
842
+ elif hasattr(obj, "co_linetable"):
843
+ # Python 3.10 and later: obj.co_lnotab is deprecated and constructor
844
+ # expects obj.co_linetable instead.
845
+ args = (
846
+ obj.co_argcount,
847
+ obj.co_posonlyargcount,
848
+ obj.co_kwonlyargcount,
849
+ obj.co_nlocals,
850
+ obj.co_stacksize,
851
+ obj.co_flags,
852
+ obj.co_code,
853
+ obj.co_consts,
854
+ obj.co_names,
855
+ obj.co_varnames,
856
+ obj.co_filename,
857
+ obj.co_name,
858
+ obj.co_firstlineno,
859
+ obj.co_linetable,
860
+ obj.co_freevars,
861
+ obj.co_cellvars,
862
+ )
863
+ elif hasattr(obj, "co_nmeta"): # pragma: no cover
864
+ # "nogil" Python: modified attributes from 3.9
865
+ args = (
866
+ obj.co_argcount,
867
+ obj.co_posonlyargcount,
868
+ obj.co_kwonlyargcount,
869
+ obj.co_nlocals,
870
+ obj.co_framesize,
871
+ obj.co_ndefaultargs,
872
+ obj.co_nmeta,
873
+ obj.co_flags,
874
+ obj.co_code,
875
+ obj.co_consts,
876
+ obj.co_varnames,
877
+ obj.co_filename,
878
+ obj.co_name,
879
+ obj.co_firstlineno,
880
+ obj.co_lnotab,
881
+ obj.co_exc_handlers,
882
+ obj.co_jump_table,
883
+ obj.co_freevars,
884
+ obj.co_cellvars,
885
+ obj.co_free2reg,
886
+ obj.co_cell2reg,
887
+ )
888
+ else:
889
+ # Backward compat for 3.8 and 3.9
890
+ args = (
891
+ obj.co_argcount,
892
+ obj.co_posonlyargcount,
893
+ obj.co_kwonlyargcount,
894
+ obj.co_nlocals,
895
+ obj.co_stacksize,
896
+ obj.co_flags,
897
+ obj.co_code,
898
+ obj.co_consts,
899
+ obj.co_names,
900
+ obj.co_varnames,
901
+ obj.co_filename,
902
+ obj.co_name,
903
+ obj.co_firstlineno,
904
+ obj.co_lnotab,
905
+ obj.co_freevars,
906
+ obj.co_cellvars,
907
+ )
908
+ return types.CodeType, args
909
+
910
+
911
+ def _cell_reduce(obj):
912
+ """Cell (containing values of a function's free variables) reducer."""
913
+ try:
914
+ obj.cell_contents
915
+ except ValueError: # cell is empty
916
+ return _make_empty_cell, ()
917
+ else:
918
+ return _make_cell, (obj.cell_contents,)
919
+
920
+
921
+ def _classmethod_reduce(obj):
922
+ orig_func = obj.__func__
923
+ return type(obj), (orig_func,)
924
+
925
+
926
+ def _file_reduce(obj):
927
+ """Save a file."""
928
+ import io
929
+
930
+ if not hasattr(obj, "name") or not hasattr(obj, "mode"):
931
+ raise pickle.PicklingError(
932
+ "Cannot pickle files that do not map to an actual file"
933
+ )
934
+ if obj is sys.stdout:
935
+ return getattr, (sys, "stdout")
936
+ if obj is sys.stderr:
937
+ return getattr, (sys, "stderr")
938
+ if obj is sys.stdin:
939
+ raise pickle.PicklingError("Cannot pickle standard input")
940
+ if obj.closed:
941
+ raise pickle.PicklingError("Cannot pickle closed files")
942
+ if hasattr(obj, "isatty") and obj.isatty():
943
+ raise pickle.PicklingError("Cannot pickle files that map to tty objects")
944
+ if "r" not in obj.mode and "+" not in obj.mode:
945
+ raise pickle.PicklingError(
946
+ "Cannot pickle files that are not opened for reading: %s" % obj.mode
947
+ )
948
+
949
+ name = obj.name
950
+
951
+ retval = io.StringIO()
952
+
953
+ try:
954
+ # Read the whole file
955
+ curloc = obj.tell()
956
+ obj.seek(0)
957
+ contents = obj.read()
958
+ obj.seek(curloc)
959
+ except OSError as e:
960
+ raise pickle.PicklingError(
961
+ "Cannot pickle file %s as it cannot be read" % name
962
+ ) from e
963
+ retval.write(contents)
964
+ retval.seek(curloc)
965
+
966
+ retval.name = name
967
+ return _file_reconstructor, (retval,)
968
+
969
+
970
+ def _getset_descriptor_reduce(obj):
971
+ return getattr, (obj.__objclass__, obj.__name__)
972
+
973
+
974
+ def _mappingproxy_reduce(obj):
975
+ return types.MappingProxyType, (dict(obj),)
976
+
977
+
978
+ def _memoryview_reduce(obj):
979
+ return bytes, (obj.tobytes(),)
980
+
981
+
982
+ def _module_reduce(obj):
983
+ if _should_pickle_by_reference(obj):
984
+ return subimport, (obj.__name__,)
985
+ else:
986
+ # Some external libraries can populate the "__builtins__" entry of a
987
+ # module's `__dict__` with unpicklable objects (see #316). For that
988
+ # reason, we do not attempt to pickle the "__builtins__" entry, and
989
+ # restore a default value for it at unpickling time.
990
+ state = obj.__dict__.copy()
991
+ state.pop("__builtins__", None)
992
+ return dynamic_subimport, (obj.__name__, state)
993
+
994
+
995
+ def _method_reduce(obj):
996
+ return (types.MethodType, (obj.__func__, obj.__self__))
997
+
998
+
999
+ def _logger_reduce(obj):
1000
+ return logging.getLogger, (obj.name,)
1001
+
1002
+
1003
+ def _root_logger_reduce(obj):
1004
+ return logging.getLogger, ()
1005
+
1006
+
1007
+ def _property_reduce(obj):
1008
+ return property, (obj.fget, obj.fset, obj.fdel, obj.__doc__)
1009
+
1010
+
1011
+ def _weakset_reduce(obj):
1012
+ return weakref.WeakSet, (list(obj),)
1013
+
1014
+
1015
+ def _dynamic_class_reduce(obj):
1016
+ """Save a class that can't be referenced as a module attribute.
1017
+
1018
+ This method is used to serialize classes that are defined inside
1019
+ functions, or that otherwise can't be serialized as attribute lookups
1020
+ from importable modules.
1021
+ """
1022
+ if Enum is not None and issubclass(obj, Enum):
1023
+ return (
1024
+ _make_skeleton_enum,
1025
+ _enum_getnewargs(obj),
1026
+ _enum_getstate(obj),
1027
+ None,
1028
+ None,
1029
+ _class_setstate,
1030
+ )
1031
+ else:
1032
+ return (
1033
+ _make_skeleton_class,
1034
+ _class_getnewargs(obj),
1035
+ _class_getstate(obj),
1036
+ None,
1037
+ None,
1038
+ _class_setstate,
1039
+ )
1040
+
1041
+
1042
+ def _class_reduce(obj):
1043
+ """Select the reducer depending on the dynamic nature of the class obj."""
1044
+ if obj is type(None): # noqa
1045
+ return type, (None,)
1046
+ elif obj is type(Ellipsis):
1047
+ return type, (Ellipsis,)
1048
+ elif obj is type(NotImplemented):
1049
+ return type, (NotImplemented,)
1050
+ elif obj in _BUILTIN_TYPE_NAMES:
1051
+ return _builtin_type, (_BUILTIN_TYPE_NAMES[obj],)
1052
+ elif not _should_pickle_by_reference(obj):
1053
+ return _dynamic_class_reduce(obj)
1054
+ return NotImplemented
1055
+
1056
+
1057
+ def _dict_keys_reduce(obj):
1058
+ # Safer not to ship the full dict as sending the rest might
1059
+ # be unintended and could potentially cause leaking of
1060
+ # sensitive information
1061
+ return _make_dict_keys, (list(obj),)
1062
+
1063
+
1064
+ def _dict_values_reduce(obj):
1065
+ # Safer not to ship the full dict as sending the rest might
1066
+ # be unintended and could potentially cause leaking of
1067
+ # sensitive information
1068
+ return _make_dict_values, (list(obj),)
1069
+
1070
+
1071
+ def _dict_items_reduce(obj):
1072
+ return _make_dict_items, (dict(obj),)
1073
+
1074
+
1075
+ def _odict_keys_reduce(obj):
1076
+ # Safer not to ship the full dict as sending the rest might
1077
+ # be unintended and could potentially cause leaking of
1078
+ # sensitive information
1079
+ return _make_dict_keys, (list(obj), True)
1080
+
1081
+
1082
+ def _odict_values_reduce(obj):
1083
+ # Safer not to ship the full dict as sending the rest might
1084
+ # be unintended and could potentially cause leaking of
1085
+ # sensitive information
1086
+ return _make_dict_values, (list(obj), True)
1087
+
1088
+
1089
+ def _odict_items_reduce(obj):
1090
+ return _make_dict_items, (dict(obj), True)
1091
+
1092
+
1093
+ def _dataclass_field_base_reduce(obj):
1094
+ return _get_dataclass_field_type_sentinel, (obj.name,)
1095
+
1096
+
1097
+ # COLLECTIONS OF OBJECTS STATE SETTERS
1098
+ # ------------------------------------
1099
+ # state setters are called at unpickling time, once the object is created and
1100
+ # it has to be updated to how it was at unpickling time.
1101
+
1102
+
1103
+ def _function_setstate(obj, state):
1104
+ """Update the state of a dynamic function.
1105
+
1106
+ As __closure__ and __globals__ are readonly attributes of a function, we
1107
+ cannot rely on the native setstate routine of pickle.load_build, that calls
1108
+ setattr on items of the slotstate. Instead, we have to modify them inplace.
1109
+ """
1110
+ state, slotstate = state
1111
+ obj.__dict__.update(state)
1112
+
1113
+ obj_globals = slotstate.pop("__globals__")
1114
+ obj_closure = slotstate.pop("__closure__")
1115
+ # _cloudpickle_subimports is a set of submodules that must be loaded for
1116
+ # the pickled function to work correctly at unpickling time. Now that these
1117
+ # submodules are depickled (hence imported), they can be removed from the
1118
+ # object's state (the object state only served as a reference holder to
1119
+ # these submodules)
1120
+ slotstate.pop("_cloudpickle_submodules")
1121
+
1122
+ obj.__globals__.update(obj_globals)
1123
+ obj.__globals__["__builtins__"] = __builtins__
1124
+
1125
+ if obj_closure is not None:
1126
+ for i, cell in enumerate(obj_closure):
1127
+ try:
1128
+ value = cell.cell_contents
1129
+ except ValueError: # cell is empty
1130
+ continue
1131
+ obj.__closure__[i].cell_contents = value
1132
+
1133
+ for k, v in slotstate.items():
1134
+ setattr(obj, k, v)
1135
+
1136
+
1137
+ def _class_setstate(obj, state):
1138
+ # Check if class is being reused and needs bypass setstate logic.
1139
+ if obj in _DYNAMIC_CLASS_TRACKER_REUSING:
1140
+ return obj
1141
+ state, slotstate = state
1142
+ registry = None
1143
+ for attrname, attr in state.items():
1144
+ if attrname == "_abc_impl":
1145
+ registry = attr
1146
+ else:
1147
+ setattr(obj, attrname, attr)
1148
+ if registry is not None:
1149
+ for subclass in registry:
1150
+ obj.register(subclass)
1151
+
1152
+ return obj
1153
+
1154
+
1155
+ # COLLECTION OF DATACLASS UTILITIES
1156
+ # ---------------------------------
1157
+ # There are some internal sentinel values whose identity must be preserved when
1158
+ # unpickling dataclass fields. Each sentinel value has a unique name that we can
1159
+ # use to retrieve its identity at unpickling time.
1160
+
1161
+
1162
+ _DATACLASSE_FIELD_TYPE_SENTINELS = {
1163
+ dataclasses._FIELD.name: dataclasses._FIELD,
1164
+ dataclasses._FIELD_CLASSVAR.name: dataclasses._FIELD_CLASSVAR,
1165
+ dataclasses._FIELD_INITVAR.name: dataclasses._FIELD_INITVAR,
1166
+ }
1167
+
1168
+
1169
+ def _get_dataclass_field_type_sentinel(name):
1170
+ return _DATACLASSE_FIELD_TYPE_SENTINELS[name]
1171
+
1172
+
1173
+ class Pickler(pickle.Pickler):
1174
+ # set of reducers defined and used by cloudpickle (private)
1175
+ _dispatch_table = {}
1176
+ _dispatch_table[classmethod] = _classmethod_reduce
1177
+ _dispatch_table[io.TextIOWrapper] = _file_reduce
1178
+ _dispatch_table[logging.Logger] = _logger_reduce
1179
+ _dispatch_table[logging.RootLogger] = _root_logger_reduce
1180
+ _dispatch_table[memoryview] = _memoryview_reduce
1181
+ _dispatch_table[property] = _property_reduce
1182
+ _dispatch_table[staticmethod] = _classmethod_reduce
1183
+ _dispatch_table[CellType] = _cell_reduce
1184
+ _dispatch_table[types.CodeType] = _code_reduce
1185
+ _dispatch_table[types.GetSetDescriptorType] = _getset_descriptor_reduce
1186
+ _dispatch_table[types.ModuleType] = _module_reduce
1187
+ _dispatch_table[types.MethodType] = _method_reduce
1188
+ _dispatch_table[types.MappingProxyType] = _mappingproxy_reduce
1189
+ _dispatch_table[weakref.WeakSet] = _weakset_reduce
1190
+ _dispatch_table[typing.TypeVar] = _typevar_reduce
1191
+ _dispatch_table[_collections_abc.dict_keys] = _dict_keys_reduce
1192
+ _dispatch_table[_collections_abc.dict_values] = _dict_values_reduce
1193
+ _dispatch_table[_collections_abc.dict_items] = _dict_items_reduce
1194
+ _dispatch_table[type(OrderedDict().keys())] = _odict_keys_reduce
1195
+ _dispatch_table[type(OrderedDict().values())] = _odict_values_reduce
1196
+ _dispatch_table[type(OrderedDict().items())] = _odict_items_reduce
1197
+ _dispatch_table[abc.abstractmethod] = _classmethod_reduce
1198
+ _dispatch_table[abc.abstractclassmethod] = _classmethod_reduce
1199
+ _dispatch_table[abc.abstractstaticmethod] = _classmethod_reduce
1200
+ _dispatch_table[abc.abstractproperty] = _property_reduce
1201
+ _dispatch_table[dataclasses._FIELD_BASE] = _dataclass_field_base_reduce
1202
+
1203
+ dispatch_table = ChainMap(_dispatch_table, copyreg.dispatch_table)
1204
+
1205
+ # function reducers are defined as instance methods of cloudpickle.Pickler
1206
+ # objects, as they rely on a cloudpickle.Pickler attribute (globals_ref)
1207
+ def _dynamic_function_reduce(self, func):
1208
+ """Reduce a function that is not pickleable via attribute lookup."""
1209
+ newargs = self._function_getnewargs(func)
1210
+ state = _function_getstate(func)
1211
+ return (_make_function, newargs, state, None, None, _function_setstate)
1212
+
1213
+ def _function_reduce(self, obj):
1214
+ """Reducer for function objects.
1215
+
1216
+ If obj is a top-level attribute of a file-backed module, this reducer
1217
+ returns NotImplemented, making the cloudpickle.Pickler fall back to
1218
+ traditional pickle.Pickler routines to save obj. Otherwise, it reduces
1219
+ obj using a custom cloudpickle reducer designed specifically to handle
1220
+ dynamic functions.
1221
+ """
1222
+ if _should_pickle_by_reference(obj):
1223
+ return NotImplemented
1224
+ else:
1225
+ return self._dynamic_function_reduce(obj)
1226
+
1227
+ def _function_getnewargs(self, func):
1228
+ code = func.__code__
1229
+
1230
+ # base_globals represents the future global namespace of func at
1231
+ # unpickling time. Looking it up and storing it in
1232
+ # cloudpickle.Pickler.globals_ref allow functions sharing the same
1233
+ # globals at pickling time to also share them once unpickled, at one
1234
+ # condition: since globals_ref is an attribute of a cloudpickle.Pickler
1235
+ # instance, and that a new cloudpickle.Pickler is created each time
1236
+ # cloudpickle.dump or cloudpickle.dumps is called, functions also need
1237
+ # to be saved within the same invocation of
1238
+ # cloudpickle.dump/cloudpickle.dumps (for example:
1239
+ # cloudpickle.dumps([f1, f2])). There is no such limitation when using
1240
+ # cloudpickle.Pickler.dump, as long as the multiple invocations are
1241
+ # bound to the same cloudpickle.Pickler instance.
1242
+ base_globals = self.globals_ref.setdefault(id(func.__globals__), {})
1243
+
1244
+ if base_globals == {}:
1245
+ # Add module attributes used to resolve relative imports
1246
+ # instructions inside func.
1247
+ for k in ["__package__", "__name__", "__path__", "__file__"]:
1248
+ if k in func.__globals__:
1249
+ base_globals[k] = func.__globals__[k]
1250
+
1251
+ # Do not bind the free variables before the function is created to
1252
+ # avoid infinite recursion.
1253
+ if func.__closure__ is None:
1254
+ closure = None
1255
+ else:
1256
+ closure = tuple(_make_empty_cell() for _ in range(len(code.co_freevars)))
1257
+
1258
+ return code, base_globals, None, None, closure
1259
+
1260
+ def dump(self, obj):
1261
+ try:
1262
+ return super().dump(obj)
1263
+ except RuntimeError as e:
1264
+ if len(e.args) > 0 and "recursion" in e.args[0]:
1265
+ msg = "Could not pickle object as excessively deep recursion required."
1266
+ raise pickle.PicklingError(msg) from e
1267
+ else:
1268
+ raise
1269
+
1270
+ def __init__(self, file, protocol=None, buffer_callback=None):
1271
+ if protocol is None:
1272
+ protocol = DEFAULT_PROTOCOL
1273
+ super().__init__(file, protocol=protocol, buffer_callback=buffer_callback)
1274
+ # map functions __globals__ attribute ids, to ensure that functions
1275
+ # sharing the same global namespace at pickling time also share
1276
+ # their global namespace at unpickling time.
1277
+ self.globals_ref = {}
1278
+ self.proto = int(protocol)
1279
+
1280
+ if not PYPY:
1281
+ # pickle.Pickler is the C implementation of the CPython pickler and
1282
+ # therefore we rely on reduce_override method to customize the pickler
1283
+ # behavior.
1284
+
1285
+ # `cloudpickle.Pickler.dispatch` is only left for backward
1286
+ # compatibility - note that when using protocol 5,
1287
+ # `cloudpickle.Pickler.dispatch` is not an extension of
1288
+ # `pickle._Pickler.dispatch` dictionary, because `cloudpickle.Pickler`
1289
+ # subclasses the C-implemented `pickle.Pickler`, which does not expose
1290
+ # a `dispatch` attribute. Earlier versions of `cloudpickle.Pickler`
1291
+ # used `cloudpickle.Pickler.dispatch` as a class-level attribute
1292
+ # storing all reducers implemented by cloudpickle, but the attribute
1293
+ # name was not a great choice given because it would collide with a
1294
+ # similarly named attribute in the pure-Python `pickle._Pickler`
1295
+ # implementation in the standard library.
1296
+ dispatch = dispatch_table
1297
+
1298
+ # Implementation of the reducer_override callback, in order to
1299
+ # efficiently serialize dynamic functions and classes by subclassing
1300
+ # the C-implemented `pickle.Pickler`.
1301
+ # TODO: decorrelate reducer_override (which is tied to CPython's
1302
+ # implementation - would it make sense to backport it to pypy? - and
1303
+ # pickle's protocol 5 which is implementation agnostic. Currently, the
1304
+ # availability of both notions coincide on CPython's pickle, but it may
1305
+ # not be the case anymore when pypy implements protocol 5.
1306
+
1307
+ def reducer_override(self, obj):
1308
+ """Type-agnostic reducing callback for function and classes.
1309
+
1310
+ For performance reasons, subclasses of the C `pickle.Pickler` class
1311
+ cannot register custom reducers for functions and classes in the
1312
+ dispatch_table attribute. Reducers for such types must instead
1313
+ implemented via the special `reducer_override` method.
1314
+
1315
+ Note that this method will be called for any object except a few
1316
+ builtin-types (int, lists, dicts etc.), which differs from reducers
1317
+ in the Pickler's dispatch_table, each of them being invoked for
1318
+ objects of a specific type only.
1319
+
1320
+ This property comes in handy for classes: although most classes are
1321
+ instances of the ``type`` metaclass, some of them can be instances
1322
+ of other custom metaclasses (such as enum.EnumMeta for example). In
1323
+ particular, the metaclass will likely not be known in advance, and
1324
+ thus cannot be special-cased using an entry in the dispatch_table.
1325
+ reducer_override, among other things, allows us to register a
1326
+ reducer that will be called for any class, independently of its
1327
+ type.
1328
+
1329
+ Notes:
1330
+
1331
+ * reducer_override has the priority over dispatch_table-registered
1332
+ reducers.
1333
+ * reducer_override can be used to fix other limitations of
1334
+ cloudpickle for other types that suffered from type-specific
1335
+ reducers, such as Exceptions. See
1336
+ https://github.com/cloudpipe/cloudpickle/issues/248
1337
+ """
1338
+ t = type(obj)
1339
+ try:
1340
+ is_anyclass = issubclass(t, type)
1341
+ except TypeError: # t is not a class (old Boost; see SF #502085)
1342
+ is_anyclass = False
1343
+
1344
+ if is_anyclass:
1345
+ return _class_reduce(obj)
1346
+ elif isinstance(obj, types.FunctionType):
1347
+ return self._function_reduce(obj)
1348
+ else:
1349
+ # fallback to save_global, including the Pickler's
1350
+ # dispatch_table
1351
+ return NotImplemented
1352
+
1353
+ else:
1354
+ # When reducer_override is not available, hack the pure-Python
1355
+ # Pickler's types.FunctionType and type savers. Note: the type saver
1356
+ # must override Pickler.save_global, because pickle.py contains a
1357
+ # hard-coded call to save_global when pickling meta-classes.
1358
+ dispatch = pickle.Pickler.dispatch.copy()
1359
+
1360
+ def _save_reduce_pickle5(
1361
+ self,
1362
+ func,
1363
+ args,
1364
+ state=None,
1365
+ listitems=None,
1366
+ dictitems=None,
1367
+ state_setter=None,
1368
+ obj=None,
1369
+ ):
1370
+ save = self.save
1371
+ write = self.write
1372
+ self.save_reduce(
1373
+ func,
1374
+ args,
1375
+ state=None,
1376
+ listitems=listitems,
1377
+ dictitems=dictitems,
1378
+ obj=obj,
1379
+ )
1380
+ # backport of the Python 3.8 state_setter pickle operations
1381
+ save(state_setter)
1382
+ save(obj) # simple BINGET opcode as obj is already memoized.
1383
+ save(state)
1384
+ write(pickle.TUPLE2)
1385
+ # Trigger a state_setter(obj, state) function call.
1386
+ write(pickle.REDUCE)
1387
+ # The purpose of state_setter is to carry-out an
1388
+ # inplace modification of obj. We do not care about what the
1389
+ # method might return, so its output is eventually removed from
1390
+ # the stack.
1391
+ write(pickle.POP)
1392
+
1393
+ def save_global(self, obj, name=None, pack=struct.pack):
1394
+ """Main dispatch method.
1395
+
1396
+ The name of this method is somewhat misleading: all types get
1397
+ dispatched here.
1398
+ """
1399
+ if obj is type(None): # noqa
1400
+ return self.save_reduce(type, (None,), obj=obj)
1401
+ elif obj is type(Ellipsis):
1402
+ return self.save_reduce(type, (Ellipsis,), obj=obj)
1403
+ elif obj is type(NotImplemented):
1404
+ return self.save_reduce(type, (NotImplemented,), obj=obj)
1405
+ elif obj in _BUILTIN_TYPE_NAMES:
1406
+ return self.save_reduce(
1407
+ _builtin_type, (_BUILTIN_TYPE_NAMES[obj],), obj=obj
1408
+ )
1409
+
1410
+ if name is not None:
1411
+ super().save_global(obj, name=name)
1412
+ elif not _should_pickle_by_reference(obj, name=name):
1413
+ self._save_reduce_pickle5(*_dynamic_class_reduce(obj), obj=obj)
1414
+ else:
1415
+ super().save_global(obj, name=name)
1416
+
1417
+ dispatch[type] = save_global
1418
+
1419
+ def save_function(self, obj, name=None):
1420
+ """Registered with the dispatch to handle all function types.
1421
+
1422
+ Determines what kind of function obj is (e.g. lambda, defined at
1423
+ interactive prompt, etc) and handles the pickling appropriately.
1424
+ """
1425
+ if _should_pickle_by_reference(obj, name=name):
1426
+ return super().save_global(obj, name=name)
1427
+ elif PYPY and isinstance(obj.__code__, builtin_code_type):
1428
+ return self.save_pypy_builtin_func(obj)
1429
+ else:
1430
+ return self._save_reduce_pickle5(
1431
+ *self._dynamic_function_reduce(obj), obj=obj
1432
+ )
1433
+
1434
+ def save_pypy_builtin_func(self, obj):
1435
+ """Save pypy equivalent of builtin functions.
1436
+
1437
+ PyPy does not have the concept of builtin-functions. Instead,
1438
+ builtin-functions are simple function instances, but with a
1439
+ builtin-code attribute.
1440
+ Most of the time, builtin functions should be pickled by attribute.
1441
+ But PyPy has flaky support for __qualname__, so some builtin
1442
+ functions such as float.__new__ will be classified as dynamic. For
1443
+ this reason only, we created this special routine. Because
1444
+ builtin-functions are not expected to have closure or globals,
1445
+ there is no additional hack (compared the one already implemented
1446
+ in pickle) to protect ourselves from reference cycles. A simple
1447
+ (reconstructor, newargs, obj.__dict__) tuple is save_reduced. Note
1448
+ also that PyPy improved their support for __qualname__ in v3.6, so
1449
+ this routing should be removed when cloudpickle supports only PyPy
1450
+ 3.6 and later.
1451
+ """
1452
+ rv = (
1453
+ types.FunctionType,
1454
+ (obj.__code__, {}, obj.__name__, obj.__defaults__, obj.__closure__),
1455
+ obj.__dict__,
1456
+ )
1457
+ self.save_reduce(*rv, obj=obj)
1458
+
1459
+ dispatch[types.FunctionType] = save_function
1460
+
1461
+
1462
+ # Shorthands similar to pickle.dump/pickle.dumps
1463
+
1464
+
1465
+ def dump(obj, file, protocol=None, buffer_callback=None):
1466
+ """Serialize obj as bytes streamed into file
1467
+
1468
+ protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
1469
+ pickle.HIGHEST_PROTOCOL. This setting favors maximum communication
1470
+ speed between processes running the same Python version.
1471
+
1472
+ Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure
1473
+ compatibility with older versions of Python (although this is not always
1474
+ guaranteed to work because cloudpickle relies on some internal
1475
+ implementation details that can change from one Python version to the
1476
+ next).
1477
+ """
1478
+ Pickler(file, protocol=protocol, buffer_callback=buffer_callback).dump(obj)
1479
+
1480
+
1481
+ def dumps(obj, protocol=None, buffer_callback=None):
1482
+ """Serialize obj as a string of bytes allocated in memory
1483
+
1484
+ protocol defaults to cloudpickle.DEFAULT_PROTOCOL which is an alias to
1485
+ pickle.HIGHEST_PROTOCOL. This setting favors maximum communication
1486
+ speed between processes running the same Python version.
1487
+
1488
+ Set protocol=pickle.DEFAULT_PROTOCOL instead if you need to ensure
1489
+ compatibility with older versions of Python (although this is not always
1490
+ guaranteed to work because cloudpickle relies on some internal
1491
+ implementation details that can change from one Python version to the
1492
+ next).
1493
+ """
1494
+ with io.BytesIO() as file:
1495
+ cp = Pickler(file, protocol=protocol, buffer_callback=buffer_callback)
1496
+ cp.dump(obj)
1497
+ return file.getvalue()
1498
+
1499
+
1500
+ # Include pickles unloading functions in this namespace for convenience.
1501
+ load, loads = pickle.load, pickle.loads
1502
+
1503
+ # Backward compat alias.
1504
+ CloudPickler = Pickler
lib/python3.10/site-packages/numba/cloudpickle/cloudpickle_fast.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Compatibility module.
2
+
3
+ It can be necessary to load files generated by previous versions of cloudpickle
4
+ that rely on symbols being defined under the `cloudpickle.cloudpickle_fast`
5
+ namespace.
6
+
7
+ See: tests/test_backward_compat.py
8
+ """
9
+ from . import cloudpickle
10
+
11
+
12
+ def __getattr__(name):
13
+ return getattr(cloudpickle, name)
lib/python3.10/site-packages/numba/core/annotations/__init__.py ADDED
File without changes
lib/python3.10/site-packages/numba/core/annotations/pretty_annotate.py ADDED
@@ -0,0 +1,283 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This module implements code highlighting of numba function annotations.
3
+ """
4
+
5
+ from warnings import warn
6
+
7
+ warn("The pretty_annotate functionality is experimental and might change API",
8
+ FutureWarning)
9
+
10
+ def hllines(code, style):
11
+ try:
12
+ from pygments import highlight
13
+ from pygments.lexers import PythonLexer
14
+ from pygments.formatters import HtmlFormatter
15
+ except ImportError:
16
+ raise ImportError("please install the 'pygments' package")
17
+ pylex = PythonLexer()
18
+ "Given a code string, return a list of html-highlighted lines"
19
+ hf = HtmlFormatter(noclasses=True, style=style, nowrap=True)
20
+ res = highlight(code, pylex, hf)
21
+ return res.splitlines()
22
+
23
+
24
+ def htlines(code, style):
25
+ try:
26
+ from pygments import highlight
27
+ from pygments.lexers import PythonLexer
28
+ # TerminalFormatter does not support themes, Terminal256 should,
29
+ # but seem to not work.
30
+ from pygments.formatters import TerminalFormatter
31
+ except ImportError:
32
+ raise ImportError("please install the 'pygments' package")
33
+ pylex = PythonLexer()
34
+ "Given a code string, return a list of ANSI-highlighted lines"
35
+ hf = TerminalFormatter(style=style)
36
+ res = highlight(code, pylex, hf)
37
+ return res.splitlines()
38
+
39
+ def get_ansi_template():
40
+ try:
41
+ from jinja2 import Template
42
+ except ImportError:
43
+ raise ImportError("please install the 'jinja2' package")
44
+ return Template("""
45
+ {%- for func_key in func_data.keys() -%}
46
+ Function name: \x1b[34m{{func_data[func_key]['funcname']}}\x1b[39;49;00m
47
+ {%- if func_data[func_key]['filename'] -%}
48
+ {{'\n'}}In file: \x1b[34m{{func_data[func_key]['filename'] -}}\x1b[39;49;00m
49
+ {%- endif -%}
50
+ {{'\n'}}With signature: \x1b[34m{{func_key[1]}}\x1b[39;49;00m
51
+ {{- "\n" -}}
52
+ {%- for num, line, hl, hc in func_data[func_key]['pygments_lines'] -%}
53
+ {{-'\n'}}{{ num}}: {{hc-}}
54
+ {%- if func_data[func_key]['ir_lines'][num] -%}
55
+ {%- for ir_line, ir_line_type in func_data[func_key]['ir_lines'][num] %}
56
+ {{-'\n'}}--{{- ' '*func_data[func_key]['python_indent'][num]}}
57
+ {{- ' '*(func_data[func_key]['ir_indent'][num][loop.index0]+4)
58
+ }}{{ir_line }}\x1b[41m{{ir_line_type-}}\x1b[39;49;00m
59
+ {%- endfor -%}
60
+ {%- endif -%}
61
+ {%- endfor -%}
62
+ {%- endfor -%}
63
+ """)
64
+ return ansi_template
65
+
66
+ def get_html_template():
67
+ try:
68
+ from jinja2 import Template
69
+ except ImportError:
70
+ raise ImportError("please install the 'jinja2' package")
71
+ return Template("""
72
+ <html>
73
+ <head>
74
+ <style>
75
+
76
+ .annotation_table {
77
+ color: #000000;
78
+ font-family: monospace;
79
+ margin: 5px;
80
+ width: 100%;
81
+ }
82
+
83
+ /* override JupyterLab style */
84
+ .annotation_table td {
85
+ text-align: left;
86
+ background-color: transparent;
87
+ padding: 1px;
88
+ }
89
+
90
+ .annotation_table tbody tr:nth-child(even) {
91
+ background: white;
92
+ }
93
+
94
+ .annotation_table code
95
+ {
96
+ background-color: transparent;
97
+ white-space: normal;
98
+ }
99
+
100
+ /* End override JupyterLab style */
101
+
102
+ tr:hover {
103
+ background-color: rgba(92, 200, 249, 0.25);
104
+ }
105
+
106
+ td.object_tag summary ,
107
+ td.lifted_tag summary{
108
+ font-weight: bold;
109
+ display: list-item;
110
+ }
111
+
112
+ span.lifted_tag {
113
+ color: #00cc33;
114
+ }
115
+
116
+ span.object_tag {
117
+ color: #cc3300;
118
+ }
119
+
120
+
121
+ td.lifted_tag {
122
+ background-color: #cdf7d8;
123
+ }
124
+
125
+ td.object_tag {
126
+ background-color: #fef5c8;
127
+ }
128
+
129
+ code.ir_code {
130
+ color: grey;
131
+ font-style: italic;
132
+ }
133
+
134
+ .metadata {
135
+ border-bottom: medium solid black;
136
+ display: inline-block;
137
+ padding: 5px;
138
+ width: 100%;
139
+ }
140
+
141
+ .annotations {
142
+ padding: 5px;
143
+ }
144
+
145
+ .hidden {
146
+ display: none;
147
+ }
148
+
149
+ .buttons {
150
+ padding: 10px;
151
+ cursor: pointer;
152
+ }
153
+ </style>
154
+ </head>
155
+
156
+ <body>
157
+ {% for func_key in func_data.keys() %}
158
+ <div class="metadata">
159
+ Function name: {{func_data[func_key]['funcname']}}<br />
160
+ {% if func_data[func_key]['filename'] %}
161
+ in file: {{func_data[func_key]['filename']|escape}}<br />
162
+ {% endif %}
163
+ with signature: {{func_key[1]|e}}
164
+ </div>
165
+ <div class="annotations">
166
+ <table class="annotation_table tex2jax_ignore">
167
+ {%- for num, line, hl, hc in func_data[func_key]['pygments_lines'] -%}
168
+ {%- if func_data[func_key]['ir_lines'][num] %}
169
+ <tr><td style="text-align:left;" class="{{func_data[func_key]['python_tags'][num]}}">
170
+ <details>
171
+ <summary>
172
+ <code>
173
+ {{num}}:
174
+ {{'&nbsp;'*func_data[func_key]['python_indent'][num]}}{{hl}}
175
+ </code>
176
+ </summary>
177
+ <table class="annotation_table">
178
+ <tbody>
179
+ {%- for ir_line, ir_line_type in func_data[func_key]['ir_lines'][num] %}
180
+ <tr class="ir_code">
181
+ <td style="text-align: left;"><code>
182
+ &nbsp;
183
+ {{- '&nbsp;'*func_data[func_key]['python_indent'][num]}}
184
+ {{ '&nbsp;'*func_data[func_key]['ir_indent'][num][loop.index0]}}{{ir_line|e -}}
185
+ <span class="object_tag">{{ir_line_type}}</span>
186
+ </code>
187
+ </td>
188
+ </tr>
189
+ {%- endfor -%}
190
+ </tbody>
191
+ </table>
192
+ </details>
193
+ </td></tr>
194
+ {% else -%}
195
+ <tr><td style="text-align:left; padding-left: 22px;" class="{{func_data[func_key]['python_tags'][num]}}">
196
+ <code>
197
+ {{num}}:
198
+ {{'&nbsp;'*func_data[func_key]['python_indent'][num]}}{{hl}}
199
+ </code>
200
+ </td></tr>
201
+ {%- endif -%}
202
+ {%- endfor -%}
203
+ </table>
204
+ </div>
205
+ {% endfor %}
206
+ </body>
207
+ </html>
208
+ """)
209
+
210
+
211
+ def reform_code(annotation):
212
+ """
213
+ Extract the code from the Numba annotation datastructure.
214
+
215
+ Pygments can only highlight full multi-line strings, the Numba
216
+ annotation is list of single lines, with indentation removed.
217
+ """
218
+ ident_dict = annotation['python_indent']
219
+ s= ''
220
+ for n,l in annotation['python_lines']:
221
+ s = s+' '*ident_dict[n]+l+'\n'
222
+ return s
223
+
224
+
225
+ class Annotate:
226
+ """
227
+ Construct syntax highlighted annotation for a given jitted function:
228
+
229
+ Example:
230
+
231
+ >>> import numba
232
+ >>> from numba.pretty_annotate import Annotate
233
+ >>> @numba.jit
234
+ ... def test(q):
235
+ ... res = 0
236
+ ... for i in range(q):
237
+ ... res += i
238
+ ... return res
239
+ ...
240
+ >>> test(10)
241
+ 45
242
+ >>> Annotate(test)
243
+
244
+ The last line will return an HTML and/or ANSI representation that will be
245
+ displayed accordingly in Jupyter/IPython.
246
+
247
+ Function annotations persist across compilation for newly encountered
248
+ type signatures and as a result annotations are shown for all signatures
249
+ by default.
250
+
251
+ Annotations for a specific signature can be shown by using the
252
+ ``signature`` parameter.
253
+
254
+ >>> @numba.jit
255
+ ... def add(x, y):
256
+ ... return x + y
257
+ ...
258
+ >>> add(1, 2)
259
+ 3
260
+ >>> add(1.3, 5.7)
261
+ 7.0
262
+ >>> add.signatures
263
+ [(int64, int64), (float64, float64)]
264
+ >>> Annotate(add, signature=add.signatures[1]) # annotation for (float64, float64)
265
+ """
266
+ def __init__(self, function, signature=None, **kwargs):
267
+
268
+ style = kwargs.get('style', 'default')
269
+ if not function.signatures:
270
+ raise ValueError('function need to be jitted for at least one signature')
271
+ ann = function.get_annotation_info(signature=signature)
272
+ self.ann = ann
273
+
274
+ for k,v in ann.items():
275
+ res = hllines(reform_code(v), style)
276
+ rest = htlines(reform_code(v), style)
277
+ v['pygments_lines'] = [(a,b,c, d) for (a,b),c, d in zip(v['python_lines'], res, rest)]
278
+
279
+ def _repr_html_(self):
280
+ return get_html_template().render(func_data=self.ann)
281
+
282
+ def __repr__(self):
283
+ return get_ansi_template().render(func_data=self.ann)
lib/python3.10/site-packages/numba/core/annotations/template.html ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <html>
2
+
3
+ <head>
4
+
5
+ <style>
6
+
7
+ .annotation_table {
8
+ color: #000000;
9
+ font-family: monospace;
10
+ margin: 5px;
11
+ width: 100%;
12
+ }
13
+
14
+ /* override JupyterLab style */
15
+ .annotation_table td {
16
+ text-align: left;
17
+ background-color: transparent;
18
+ padding: 1px;
19
+ }
20
+
21
+ .annotation_table code
22
+ {
23
+ background-color: transparent;
24
+ white-space: normal;
25
+ }
26
+
27
+ /* End override JupyterLab style */
28
+
29
+ tr:hover {
30
+ background-color: rgba(92, 200, 249, 0.25);
31
+ }
32
+
33
+ td.object_tag summary ,
34
+ td.lifted_tag summary{
35
+ font-weight: bold;
36
+ display: list-item;
37
+ }
38
+
39
+ span.lifted_tag {
40
+ color: #00cc33;
41
+ }
42
+
43
+ span.object_tag {
44
+ color: #cc3300;
45
+ }
46
+
47
+
48
+ td.lifted_tag {
49
+ background-color: #cdf7d8;
50
+ }
51
+
52
+ td.object_tag {
53
+ background-color: #ffd3d3;
54
+ }
55
+
56
+ code.ir_code {
57
+ color: grey;
58
+ font-style: italic;
59
+ }
60
+
61
+ .metadata {
62
+ border-bottom: medium solid black;
63
+ display: inline-block;
64
+ padding: 5px;
65
+ width: 100%;
66
+ }
67
+
68
+ .annotations {
69
+ padding: 5px;
70
+ }
71
+
72
+ .hidden {
73
+ display: none;
74
+ }
75
+
76
+ .buttons {
77
+ padding: 10px;
78
+ cursor: pointer;
79
+ }
80
+
81
+ </style>
82
+
83
+ </head>
84
+
85
+ <body>
86
+
87
+ {% for func_key in func_data.keys() %}
88
+
89
+ {% set loop1 = loop %}
90
+
91
+ <div class="metadata">
92
+ Function name: {{func_data[func_key]['funcname']}}<br />
93
+ in file: {{func_data[func_key]['filename']}}<br />
94
+ with signature: {{func_key[1]|e}}
95
+ </div>
96
+
97
+ <div class="annotations">
98
+
99
+ <table class="annotation_table tex2jax_ignore">
100
+ {%- for num, line in func_data[func_key]['python_lines'] -%}
101
+ {%- if func_data[func_key]['ir_lines'][num] %}
102
+ <tr><td class="{{func_data[func_key]['python_tags'][num]}}">
103
+ <details>
104
+ <summary>
105
+ <code>
106
+ {{num}}:
107
+ {{func_data[func_key]['python_indent'][num]}}{{line|e}}
108
+ </code>
109
+ </summary>
110
+ <table class="annotation_table">
111
+ <tbody>
112
+ {%- for ir_line, ir_line_type in func_data[func_key]['ir_lines'][num] %}
113
+ <tr class="ir_code func{{loop1.index0}}_ir">
114
+ <td><code>&nbsp;
115
+ {{- func_data[func_key]['python_indent'][num]}}
116
+ {{func_data[func_key]['ir_indent'][num][loop.index0]}}{{ir_line|e -}}
117
+ <span class="object_tag">{{ir_line_type}}</span>
118
+ </code>
119
+ </td>
120
+ </tr>
121
+ {%- endfor -%}
122
+ </tbody>
123
+ </table>
124
+ </details>
125
+ </td></tr>
126
+ {% else -%}
127
+ <tr><td style=" padding-left: 22px;" class="{{func_data[func_key]['python_tags'][num]}}">
128
+ <code>
129
+ {{num}}:
130
+ {{func_data[func_key]['python_indent'][num]}}{{line|e}}
131
+ </code>
132
+ </td></tr>
133
+ {%- endif -%}
134
+ {%- endfor -%}
135
+ </table>
136
+ </div>
137
+
138
+ <br /><br /><br />
139
+
140
+ {% endfor %}
141
+
142
+ </body>
143
+
144
+ </html>
lib/python3.10/site-packages/numba/core/annotations/type_annotations.py ADDED
@@ -0,0 +1,283 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import defaultdict, OrderedDict
2
+ from collections.abc import Mapping
3
+ from contextlib import closing
4
+ import copy
5
+ import inspect
6
+ import os
7
+ import re
8
+ import sys
9
+ import textwrap
10
+ from io import StringIO
11
+
12
+ import numba.core.dispatcher
13
+ from numba.core import ir
14
+
15
+
16
+ class SourceLines(Mapping):
17
+ def __init__(self, func):
18
+
19
+ try:
20
+ lines, startno = inspect.getsourcelines(func)
21
+ except OSError:
22
+ self.lines = ()
23
+ self.startno = 0
24
+ else:
25
+ self.lines = textwrap.dedent(''.join(lines)).splitlines()
26
+ self.startno = startno
27
+
28
+ def __getitem__(self, lineno):
29
+ try:
30
+ return self.lines[lineno - self.startno].rstrip()
31
+ except IndexError:
32
+ return ''
33
+
34
+ def __iter__(self):
35
+ return iter((self.startno + i) for i in range(len(self.lines)))
36
+
37
+ def __len__(self):
38
+ return len(self.lines)
39
+
40
+ @property
41
+ def avail(self):
42
+ return bool(self.lines)
43
+
44
+
45
+ class TypeAnnotation(object):
46
+
47
+ # func_data dict stores annotation data for all functions that are
48
+ # compiled. We store the data in the TypeAnnotation class since a new
49
+ # TypeAnnotation instance is created for each function that is compiled.
50
+ # For every function that is compiled, we add the type annotation data to
51
+ # this dict and write the html annotation file to disk (rewrite the html
52
+ # file for every function since we don't know if this is the last function
53
+ # to be compiled).
54
+ func_data = OrderedDict()
55
+
56
+ def __init__(self, func_ir, typemap, calltypes, lifted, lifted_from,
57
+ args, return_type, html_output=None):
58
+ self.func_id = func_ir.func_id
59
+ self.blocks = func_ir.blocks
60
+ self.typemap = typemap
61
+ self.calltypes = calltypes
62
+ self.filename = func_ir.loc.filename
63
+ self.linenum = str(func_ir.loc.line)
64
+ self.signature = str(args) + ' -> ' + str(return_type)
65
+
66
+ # lifted loop information
67
+ self.lifted = lifted
68
+ self.num_lifted_loops = len(lifted)
69
+
70
+ # If this is a lifted loop function that is being compiled, lifted_from
71
+ # points to annotation data from function that this loop lifted function
72
+ # was lifted from. This is used to stick lifted loop annotations back
73
+ # into original function.
74
+ self.lifted_from = lifted_from
75
+
76
+ def prepare_annotations(self):
77
+ # Prepare annotations
78
+ groupedinst = defaultdict(list)
79
+ found_lifted_loop = False
80
+ #for blkid, blk in self.blocks.items():
81
+ for blkid in sorted(self.blocks.keys()):
82
+ blk = self.blocks[blkid]
83
+ groupedinst[blk.loc.line].append("label %s" % blkid)
84
+ for inst in blk.body:
85
+ lineno = inst.loc.line
86
+
87
+ if isinstance(inst, ir.Assign):
88
+ if found_lifted_loop:
89
+ atype = 'XXX Lifted Loop XXX'
90
+ found_lifted_loop = False
91
+ elif (isinstance(inst.value, ir.Expr) and
92
+ inst.value.op == 'call'):
93
+ atype = self.calltypes[inst.value]
94
+ elif (isinstance(inst.value, ir.Const) and
95
+ isinstance(inst.value.value, numba.core.dispatcher.LiftedLoop)):
96
+ atype = 'XXX Lifted Loop XXX'
97
+ found_lifted_loop = True
98
+ else:
99
+ # TODO: fix parfor lowering so that typemap is valid.
100
+ atype = self.typemap.get(inst.target.name, "<missing>")
101
+
102
+ aline = "%s = %s :: %s" % (inst.target, inst.value, atype)
103
+ elif isinstance(inst, ir.SetItem):
104
+ atype = self.calltypes[inst]
105
+ aline = "%s :: %s" % (inst, atype)
106
+ else:
107
+ aline = "%s" % inst
108
+ groupedinst[lineno].append(" %s" % aline)
109
+ return groupedinst
110
+
111
+ def annotate(self):
112
+ source = SourceLines(self.func_id.func)
113
+ # if not source.avail:
114
+ # return "Source code unavailable"
115
+
116
+ groupedinst = self.prepare_annotations()
117
+
118
+ # Format annotations
119
+ io = StringIO()
120
+ with closing(io):
121
+ if source.avail:
122
+ print("# File: %s" % self.filename, file=io)
123
+ for num in source:
124
+ srcline = source[num]
125
+ ind = _getindent(srcline)
126
+ print("%s# --- LINE %d --- " % (ind, num), file=io)
127
+ for inst in groupedinst[num]:
128
+ print('%s# %s' % (ind, inst), file=io)
129
+ print(file=io)
130
+ print(srcline, file=io)
131
+ print(file=io)
132
+ if self.lifted:
133
+ print("# The function contains lifted loops", file=io)
134
+ for loop in self.lifted:
135
+ print("# Loop at line %d" % loop.get_source_location(),
136
+ file=io)
137
+ print("# Has %d overloads" % len(loop.overloads),
138
+ file=io)
139
+ for cres in loop.overloads.values():
140
+ print(cres.type_annotation, file=io)
141
+ else:
142
+ print("# Source code unavailable", file=io)
143
+ for num in groupedinst:
144
+ for inst in groupedinst[num]:
145
+ print('%s' % (inst,), file=io)
146
+ print(file=io)
147
+
148
+ return io.getvalue()
149
+
150
+ def html_annotate(self, outfile):
151
+ # ensure that annotation information is assembled
152
+ self.annotate_raw()
153
+ # make a deep copy ahead of the pending mutations
154
+ func_data = copy.deepcopy(self.func_data)
155
+
156
+ key = 'python_indent'
157
+ for this_func in func_data.values():
158
+ if key in this_func:
159
+ idents = {}
160
+ for line, amount in this_func[key].items():
161
+ idents[line] = '&nbsp;' * amount
162
+ this_func[key] = idents
163
+
164
+ key = 'ir_indent'
165
+ for this_func in func_data.values():
166
+ if key in this_func:
167
+ idents = {}
168
+ for line, ir_id in this_func[key].items():
169
+ idents[line] = ['&nbsp;' * amount for amount in ir_id]
170
+ this_func[key] = idents
171
+
172
+
173
+
174
+ try:
175
+ from jinja2 import Template
176
+ except ImportError:
177
+ raise ImportError("please install the 'jinja2' package")
178
+
179
+ root = os.path.join(os.path.dirname(__file__))
180
+ template_filename = os.path.join(root, 'template.html')
181
+ with open(template_filename, 'r') as template:
182
+ html = template.read()
183
+
184
+ template = Template(html)
185
+ rendered = template.render(func_data=func_data)
186
+ outfile.write(rendered)
187
+
188
+ def annotate_raw(self):
189
+ """
190
+ This returns "raw" annotation information i.e. it has no output format
191
+ specific markup included.
192
+ """
193
+ python_source = SourceLines(self.func_id.func)
194
+ ir_lines = self.prepare_annotations()
195
+ line_nums = [num for num in python_source]
196
+ lifted_lines = [l.get_source_location() for l in self.lifted]
197
+
198
+ def add_ir_line(func_data, line):
199
+ line_str = line.strip()
200
+ line_type = ''
201
+ if line_str.endswith('pyobject'):
202
+ line_str = line_str.replace('pyobject', '')
203
+ line_type = 'pyobject'
204
+ func_data['ir_lines'][num].append((line_str, line_type))
205
+ indent_len = len(_getindent(line))
206
+ func_data['ir_indent'][num].append(indent_len)
207
+
208
+ func_key = (self.func_id.filename + ':' + str(self.func_id.firstlineno + 1),
209
+ self.signature)
210
+ if self.lifted_from is not None and self.lifted_from[1]['num_lifted_loops'] > 0:
211
+ # This is a lifted loop function that is being compiled. Get the
212
+ # numba ir for lines in loop function to use for annotating
213
+ # original python function that the loop was lifted from.
214
+ func_data = self.lifted_from[1]
215
+ for num in line_nums:
216
+ if num not in ir_lines.keys():
217
+ continue
218
+ func_data['ir_lines'][num] = []
219
+ func_data['ir_indent'][num] = []
220
+ for line in ir_lines[num]:
221
+ add_ir_line(func_data, line)
222
+ if line.strip().endswith('pyobject'):
223
+ func_data['python_tags'][num] = 'object_tag'
224
+ # If any pyobject line is found, make sure original python
225
+ # line that was marked as a lifted loop start line is tagged
226
+ # as an object line instead. Lifted loop start lines should
227
+ # only be marked as lifted loop lines if the lifted loop
228
+ # was successfully compiled in nopython mode.
229
+ func_data['python_tags'][self.lifted_from[0]] = 'object_tag'
230
+
231
+ # We're done with this lifted loop, so decrement lifted loop counter.
232
+ # When lifted loop counter hits zero, that means we're ready to write
233
+ # out annotations to html file.
234
+ self.lifted_from[1]['num_lifted_loops'] -= 1
235
+
236
+ elif func_key not in TypeAnnotation.func_data.keys():
237
+ TypeAnnotation.func_data[func_key] = {}
238
+ func_data = TypeAnnotation.func_data[func_key]
239
+
240
+ for i, loop in enumerate(self.lifted):
241
+ # Make sure that when we process each lifted loop function later,
242
+ # we'll know where it originally came from.
243
+ loop.lifted_from = (lifted_lines[i], func_data)
244
+ func_data['num_lifted_loops'] = self.num_lifted_loops
245
+
246
+ func_data['filename'] = self.filename
247
+ func_data['funcname'] = self.func_id.func_name
248
+ func_data['python_lines'] = []
249
+ func_data['python_indent'] = {}
250
+ func_data['python_tags'] = {}
251
+ func_data['ir_lines'] = {}
252
+ func_data['ir_indent'] = {}
253
+
254
+ for num in line_nums:
255
+ func_data['python_lines'].append((num, python_source[num].strip()))
256
+ indent_len = len(_getindent(python_source[num]))
257
+ func_data['python_indent'][num] = indent_len
258
+ func_data['python_tags'][num] = ''
259
+ func_data['ir_lines'][num] = []
260
+ func_data['ir_indent'][num] = []
261
+
262
+ for line in ir_lines[num]:
263
+ add_ir_line(func_data, line)
264
+ if num in lifted_lines:
265
+ func_data['python_tags'][num] = 'lifted_tag'
266
+ elif line.strip().endswith('pyobject'):
267
+ func_data['python_tags'][num] = 'object_tag'
268
+ return self.func_data
269
+
270
+
271
+ def __str__(self):
272
+ return self.annotate()
273
+
274
+
275
+ re_longest_white_prefix = re.compile(r'^\s*')
276
+
277
+
278
+ def _getindent(text):
279
+ m = re_longest_white_prefix.match(text)
280
+ if not m:
281
+ return ''
282
+ else:
283
+ return ' ' * len(m.group(0))
lib/python3.10/site-packages/numba/core/datamodel/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ from .manager import DataModelManager
2
+ from .packer import ArgPacker, DataPacker
3
+ from .registry import register_default, default_manager, register
4
+ from .models import PrimitiveModel, CompositeModel, StructModel # type: ignore
lib/python3.10/site-packages/numba/core/datamodel/manager.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import weakref
2
+ from collections import ChainMap
3
+
4
+ from numba.core import types
5
+
6
+
7
+ class DataModelManager(object):
8
+ """Manages mapping of FE types to their corresponding data model
9
+ """
10
+
11
+ def __init__(self, handlers=None):
12
+ """
13
+ Parameters
14
+ -----------
15
+ handlers: Mapping[Type, DataModel] or None
16
+ Optionally provide the initial handlers mapping.
17
+ """
18
+ # { numba type class -> model factory }
19
+ self._handlers = handlers or {}
20
+ # { numba type instance -> model instance }
21
+ self._cache = weakref.WeakKeyDictionary()
22
+
23
+ def register(self, fetypecls, handler):
24
+ """Register the datamodel factory corresponding to a frontend-type class
25
+ """
26
+ assert issubclass(fetypecls, types.Type)
27
+ self._handlers[fetypecls] = handler
28
+
29
+ def lookup(self, fetype):
30
+ """Returns the corresponding datamodel given the frontend-type instance
31
+ """
32
+ try:
33
+ return self._cache[fetype]
34
+ except KeyError:
35
+ pass
36
+ handler = self._handlers[type(fetype)]
37
+ model = self._cache[fetype] = handler(self, fetype)
38
+ return model
39
+
40
+ def __getitem__(self, fetype):
41
+ """Shorthand for lookup()
42
+ """
43
+ return self.lookup(fetype)
44
+
45
+ def copy(self):
46
+ """
47
+ Make a copy of the manager.
48
+ Use this to inherit from the default data model and specialize it
49
+ for custom target.
50
+ """
51
+ return DataModelManager(self._handlers.copy())
52
+
53
+ def chain(self, other_manager):
54
+ """Create a new DataModelManager by chaining the handlers mapping of
55
+ `other_manager` with a fresh handlers mapping.
56
+
57
+ Any existing and new handlers inserted to `other_manager` will be
58
+ visible to the new manager. Any handlers inserted to the new manager
59
+ can override existing handlers in `other_manager` without actually
60
+ mutating `other_manager`.
61
+
62
+ Parameters
63
+ ----------
64
+ other_manager: DataModelManager
65
+ """
66
+ chained = ChainMap(self._handlers, other_manager._handlers)
67
+ return DataModelManager(chained)
68
+
lib/python3.10/site-packages/numba/core/datamodel/models.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ from numba.core.utils import _RedirectSubpackage
3
+ from numba.core import config
4
+
5
+ if config.USE_LEGACY_TYPE_SYSTEM: # type: ignore
6
+ sys.modules[__name__] = _RedirectSubpackage(
7
+ locals(), "numba.core.datamodel.old_models"
8
+ )
9
+ else:
10
+ sys.modules[__name__] = _RedirectSubpackage(
11
+ locals(), "numba.core.datamodel.new_models"
12
+ )
lib/python3.10/site-packages/numba/core/datamodel/new_models.py ADDED
@@ -0,0 +1,1390 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import partial
2
+ from collections import deque
3
+
4
+ from llvmlite import ir
5
+
6
+ from numba.core.datamodel.registry import register_default
7
+ from numba.core import types, cgutils
8
+ from numba.np import numpy_support
9
+
10
+
11
+ class DataModel(object):
12
+ """
13
+ DataModel describe how a FE type is represented in the LLVM IR at
14
+ different contexts.
15
+
16
+ Contexts are:
17
+
18
+ - value: representation inside function body. Maybe stored in stack.
19
+ The representation here are flexible.
20
+
21
+ - data: representation used when storing into containers (e.g. arrays).
22
+
23
+ - argument: representation used for function argument. All composite
24
+ types are unflattened into multiple primitive types.
25
+
26
+ - return: representation used for return argument.
27
+
28
+ Throughput the compiler pipeline, a LLVM value is usually passed around
29
+ in the "value" representation. All "as_" prefix function converts from
30
+ "value" representation. All "from_" prefix function converts to the
31
+ "value" representation.
32
+
33
+ """
34
+ def __init__(self, dmm, fe_type):
35
+ self._dmm = dmm
36
+ self._fe_type = fe_type
37
+
38
+ @property
39
+ def fe_type(self):
40
+ return self._fe_type
41
+
42
+ def get_value_type(self):
43
+ raise NotImplementedError(self)
44
+
45
+ def get_data_type(self):
46
+ return self.get_value_type()
47
+
48
+ def get_argument_type(self):
49
+ """Return a LLVM type or nested tuple of LLVM type
50
+ """
51
+ return self.get_value_type()
52
+
53
+ def get_return_type(self):
54
+ return self.get_value_type()
55
+
56
+ def as_data(self, builder, value):
57
+ raise NotImplementedError(self)
58
+
59
+ def as_argument(self, builder, value):
60
+ """
61
+ Takes one LLVM value
62
+ Return a LLVM value or nested tuple of LLVM value
63
+ """
64
+ raise NotImplementedError(self)
65
+
66
+ def as_return(self, builder, value):
67
+ raise NotImplementedError(self)
68
+
69
+ def from_data(self, builder, value):
70
+ raise NotImplementedError(self)
71
+
72
+ def from_argument(self, builder, value):
73
+ """
74
+ Takes a LLVM value or nested tuple of LLVM value
75
+ Returns one LLVM value
76
+ """
77
+ raise NotImplementedError(self)
78
+
79
+ def from_return(self, builder, value):
80
+ raise NotImplementedError(self)
81
+
82
+ def load_from_data_pointer(self, builder, ptr, align=None):
83
+ """
84
+ Load value from a pointer to data.
85
+ This is the default implementation, sufficient for most purposes.
86
+ """
87
+ return self.from_data(builder, builder.load(ptr, align=align))
88
+
89
+ def traverse(self, builder):
90
+ """
91
+ Traverse contained members.
92
+ Returns a iterable of contained (types, getters).
93
+ Each getter is a one-argument function accepting a LLVM value.
94
+ """
95
+ return []
96
+
97
+ def traverse_models(self):
98
+ """
99
+ Recursively list all models involved in this model.
100
+ """
101
+ return [self._dmm[t] for t in self.traverse_types()]
102
+
103
+ def traverse_types(self):
104
+ """
105
+ Recursively list all frontend types involved in this model.
106
+ """
107
+ types = [self._fe_type]
108
+ queue = deque([self])
109
+ while len(queue) > 0:
110
+ dm = queue.popleft()
111
+
112
+ for i_dm in dm.inner_models():
113
+ if i_dm._fe_type not in types:
114
+ queue.append(i_dm)
115
+ types.append(i_dm._fe_type)
116
+
117
+ return types
118
+
119
+ def inner_models(self):
120
+ """
121
+ List all *inner* models.
122
+ """
123
+ return []
124
+
125
+ def get_nrt_meminfo(self, builder, value):
126
+ """
127
+ Returns the MemInfo object or None if it is not tracked.
128
+ It is only defined for types.meminfo_pointer
129
+ """
130
+ return None
131
+
132
+ def has_nrt_meminfo(self):
133
+ return False
134
+
135
+ def contains_nrt_meminfo(self):
136
+ """
137
+ Recursively check all contained types for need for NRT meminfo.
138
+ """
139
+ return any(model.has_nrt_meminfo() for model in self.traverse_models())
140
+
141
+ def _compared_fields(self):
142
+ return (type(self), self._fe_type)
143
+
144
+ def __hash__(self):
145
+ return hash(tuple(self._compared_fields()))
146
+
147
+ def __eq__(self, other):
148
+ if type(self) is type(other):
149
+ return self._compared_fields() == other._compared_fields()
150
+ else:
151
+ return False
152
+
153
+ def __ne__(self, other):
154
+ return not self.__eq__(other)
155
+
156
+
157
+ @register_default(types.Omitted)
158
+ class OmittedArgDataModel(DataModel):
159
+ """
160
+ A data model for omitted arguments. Only the "argument" representation
161
+ is defined, other representations raise a NotImplementedError.
162
+ """
163
+ # Omitted arguments are using a dummy value type
164
+ def get_value_type(self):
165
+ return ir.LiteralStructType([])
166
+
167
+ # Omitted arguments don't produce any LLVM function argument.
168
+ def get_argument_type(self):
169
+ return ()
170
+
171
+ def as_argument(self, builder, val):
172
+ return ()
173
+
174
+ def from_argument(self, builder, val):
175
+ assert val == (), val
176
+ return None
177
+
178
+ class PrimitiveModel(DataModel):
179
+ """A primitive type can be represented natively in the target in all
180
+ usage contexts.
181
+ """
182
+
183
+ def __init__(self, dmm, fe_type, be_type):
184
+ super(PrimitiveModel, self).__init__(dmm, fe_type)
185
+ self.be_type = be_type
186
+
187
+ def get_value_type(self):
188
+ return self.be_type
189
+
190
+ def as_data(self, builder, value):
191
+ return value
192
+
193
+ def as_argument(self, builder, value):
194
+ return value
195
+
196
+ def as_return(self, builder, value):
197
+ return value
198
+
199
+ def from_data(self, builder, value):
200
+ return value
201
+
202
+ def from_argument(self, builder, value):
203
+ return value
204
+
205
+ def from_return(self, builder, value):
206
+ return value
207
+
208
+
209
+ class ProxyModel(DataModel):
210
+ """
211
+ Helper class for models which delegate to another model.
212
+ """
213
+
214
+ def get_value_type(self):
215
+ return self._proxied_model.get_value_type()
216
+
217
+ def get_data_type(self):
218
+ return self._proxied_model.get_data_type()
219
+
220
+ def get_return_type(self):
221
+ return self._proxied_model.get_return_type()
222
+
223
+ def get_argument_type(self):
224
+ return self._proxied_model.get_argument_type()
225
+
226
+ def as_data(self, builder, value):
227
+ return self._proxied_model.as_data(builder, value)
228
+
229
+ def as_argument(self, builder, value):
230
+ return self._proxied_model.as_argument(builder, value)
231
+
232
+ def as_return(self, builder, value):
233
+ return self._proxied_model.as_return(builder, value)
234
+
235
+ def from_data(self, builder, value):
236
+ return self._proxied_model.from_data(builder, value)
237
+
238
+ def from_argument(self, builder, value):
239
+ return self._proxied_model.from_argument(builder, value)
240
+
241
+ def from_return(self, builder, value):
242
+ return self._proxied_model.from_return(builder, value)
243
+
244
+
245
+ @register_default(types.EnumMember)
246
+ @register_default(types.IntEnumMember)
247
+ class EnumModel(ProxyModel):
248
+ """
249
+ Enum members are represented exactly like their values.
250
+ """
251
+ def __init__(self, dmm, fe_type):
252
+ super(EnumModel, self).__init__(dmm, fe_type)
253
+ self._proxied_model = dmm.lookup(fe_type.dtype)
254
+
255
+
256
+ @register_default(types.Opaque)
257
+ @register_default(types.PyObject)
258
+ @register_default(types.RawPointer)
259
+ @register_default(types.NoneType)
260
+ @register_default(types.StringLiteral)
261
+ @register_default(types.EllipsisType)
262
+ @register_default(types.Function)
263
+ @register_default(types.Type)
264
+ @register_default(types.Object)
265
+ @register_default(types.Module)
266
+ @register_default(types.Phantom)
267
+ @register_default(types.UndefVar)
268
+ @register_default(types.ContextManager)
269
+ @register_default(types.Dispatcher)
270
+ @register_default(types.ObjModeDispatcher)
271
+ @register_default(types.ExceptionClass)
272
+ @register_default(types.Dummy)
273
+ @register_default(types.ExceptionInstance)
274
+ @register_default(types.ExternalFunction)
275
+ @register_default(types.EnumClass)
276
+ @register_default(types.IntEnumClass)
277
+ @register_default(types.NumberClass)
278
+ @register_default(types.TypeRef)
279
+ @register_default(types.NamedTupleClass)
280
+ @register_default(types.DType)
281
+ @register_default(types.RecursiveCall)
282
+ @register_default(types.MakeFunctionLiteral)
283
+ @register_default(types.Poison)
284
+ class OpaqueModel(PrimitiveModel):
285
+ """
286
+ Passed as opaque pointers
287
+ """
288
+ _ptr_type = ir.IntType(8).as_pointer()
289
+
290
+ def __init__(self, dmm, fe_type):
291
+ be_type = self._ptr_type
292
+ super(OpaqueModel, self).__init__(dmm, fe_type, be_type)
293
+
294
+
295
+ @register_default(types.MemInfoPointer)
296
+ class MemInfoModel(OpaqueModel):
297
+
298
+ def inner_models(self):
299
+ return [self._dmm.lookup(self._fe_type.dtype)]
300
+
301
+ def has_nrt_meminfo(self):
302
+ return True
303
+
304
+ def get_nrt_meminfo(self, builder, value):
305
+ return value
306
+
307
+
308
+ @register_default(types.CPointer)
309
+ class PointerModel(PrimitiveModel):
310
+ def __init__(self, dmm, fe_type):
311
+ self._pointee_model = dmm.lookup(fe_type.dtype)
312
+ self._pointee_be_type = self._pointee_model.get_data_type()
313
+ be_type = self._pointee_be_type.as_pointer()
314
+ super(PointerModel, self).__init__(dmm, fe_type, be_type)
315
+
316
+
317
+ @register_default(types.EphemeralPointer)
318
+ class EphemeralPointerModel(PointerModel):
319
+
320
+ def get_data_type(self):
321
+ return self._pointee_be_type
322
+
323
+ def as_data(self, builder, value):
324
+ value = builder.load(value)
325
+ return self._pointee_model.as_data(builder, value)
326
+
327
+ def from_data(self, builder, value):
328
+ raise NotImplementedError("use load_from_data_pointer() instead")
329
+
330
+ def load_from_data_pointer(self, builder, ptr, align=None):
331
+ return builder.bitcast(ptr, self.get_value_type())
332
+
333
+
334
+ @register_default(types.EphemeralArray)
335
+ class EphemeralArrayModel(PointerModel):
336
+
337
+ def __init__(self, dmm, fe_type):
338
+ super(EphemeralArrayModel, self).__init__(dmm, fe_type)
339
+ self._data_type = ir.ArrayType(self._pointee_be_type,
340
+ self._fe_type.count)
341
+
342
+ def get_data_type(self):
343
+ return self._data_type
344
+
345
+ def as_data(self, builder, value):
346
+ values = [builder.load(cgutils.gep_inbounds(builder, value, i))
347
+ for i in range(self._fe_type.count)]
348
+ return cgutils.pack_array(builder, values)
349
+
350
+ def from_data(self, builder, value):
351
+ raise NotImplementedError("use load_from_data_pointer() instead")
352
+
353
+ def load_from_data_pointer(self, builder, ptr, align=None):
354
+ return builder.bitcast(ptr, self.get_value_type())
355
+
356
+
357
+ @register_default(types.ExternalFunctionPointer)
358
+ class ExternalFuncPointerModel(PrimitiveModel):
359
+ def __init__(self, dmm, fe_type):
360
+ sig = fe_type.sig
361
+ # Since the function is non-Numba, there is no adaptation
362
+ # of arguments and return value, hence get_value_type().
363
+ retty = dmm.lookup(sig.return_type).get_value_type()
364
+ args = [dmm.lookup(t).get_value_type() for t in sig.args]
365
+ be_type = ir.PointerType(ir.FunctionType(retty, args))
366
+ super(ExternalFuncPointerModel, self).__init__(dmm, fe_type, be_type)
367
+
368
+
369
+ @register_default(types.UniTuple)
370
+ @register_default(types.NamedUniTuple)
371
+ @register_default(types.StarArgUniTuple)
372
+ class UniTupleModel(DataModel):
373
+ def __init__(self, dmm, fe_type):
374
+ super(UniTupleModel, self).__init__(dmm, fe_type)
375
+ self._elem_model = dmm.lookup(fe_type.dtype)
376
+ self._count = len(fe_type)
377
+ self._value_type = ir.ArrayType(self._elem_model.get_value_type(),
378
+ self._count)
379
+ self._data_type = ir.ArrayType(self._elem_model.get_data_type(),
380
+ self._count)
381
+
382
+ def get_value_type(self):
383
+ return self._value_type
384
+
385
+ def get_data_type(self):
386
+ return self._data_type
387
+
388
+ def get_return_type(self):
389
+ return self.get_value_type()
390
+
391
+ def get_argument_type(self):
392
+ return (self._elem_model.get_argument_type(),) * self._count
393
+
394
+ def as_argument(self, builder, value):
395
+ out = []
396
+ for i in range(self._count):
397
+ v = builder.extract_value(value, [i])
398
+ v = self._elem_model.as_argument(builder, v)
399
+ out.append(v)
400
+ return out
401
+
402
+ def from_argument(self, builder, value):
403
+ out = ir.Constant(self.get_value_type(), ir.Undefined)
404
+ for i, v in enumerate(value):
405
+ v = self._elem_model.from_argument(builder, v)
406
+ out = builder.insert_value(out, v, [i])
407
+ return out
408
+
409
+ def as_data(self, builder, value):
410
+ out = ir.Constant(self.get_data_type(), ir.Undefined)
411
+ for i in range(self._count):
412
+ val = builder.extract_value(value, [i])
413
+ dval = self._elem_model.as_data(builder, val)
414
+ out = builder.insert_value(out, dval, [i])
415
+ return out
416
+
417
+ def from_data(self, builder, value):
418
+ out = ir.Constant(self.get_value_type(), ir.Undefined)
419
+ for i in range(self._count):
420
+ val = builder.extract_value(value, [i])
421
+ dval = self._elem_model.from_data(builder, val)
422
+ out = builder.insert_value(out, dval, [i])
423
+ return out
424
+
425
+ def as_return(self, builder, value):
426
+ return value
427
+
428
+ def from_return(self, builder, value):
429
+ return value
430
+
431
+ def traverse(self, builder):
432
+ def getter(i, value):
433
+ return builder.extract_value(value, i)
434
+ return [(self._fe_type.dtype, partial(getter, i))
435
+ for i in range(self._count)]
436
+
437
+ def inner_models(self):
438
+ return [self._elem_model]
439
+
440
+
441
+ class CompositeModel(DataModel):
442
+ """Any model that is composed of multiple other models should subclass from
443
+ this.
444
+ """
445
+ pass
446
+
447
+
448
+ class StructModel(CompositeModel):
449
+ _value_type = None
450
+ _data_type = None
451
+
452
+ def __init__(self, dmm, fe_type, members):
453
+ super(StructModel, self).__init__(dmm, fe_type)
454
+ if members:
455
+ self._fields, self._members = zip(*members)
456
+ else:
457
+ self._fields = self._members = ()
458
+ self._models = tuple([self._dmm.lookup(t) for t in self._members])
459
+
460
+ def get_member_fe_type(self, name):
461
+ """
462
+ StructModel-specific: get the Numba type of the field named *name*.
463
+ """
464
+ pos = self.get_field_position(name)
465
+ return self._members[pos]
466
+
467
+ def get_value_type(self):
468
+ if self._value_type is None:
469
+ self._value_type = ir.LiteralStructType([t.get_value_type()
470
+ for t in self._models])
471
+ return self._value_type
472
+
473
+ def get_data_type(self):
474
+ if self._data_type is None:
475
+ self._data_type = ir.LiteralStructType([t.get_data_type()
476
+ for t in self._models])
477
+ return self._data_type
478
+
479
+ def get_argument_type(self):
480
+ return tuple([t.get_argument_type() for t in self._models])
481
+
482
+ def get_return_type(self):
483
+ return self.get_data_type()
484
+
485
+ def _as(self, methname, builder, value):
486
+ extracted = []
487
+ for i, dm in enumerate(self._models):
488
+ extracted.append(getattr(dm, methname)(builder,
489
+ self.get(builder, value, i)))
490
+ return tuple(extracted)
491
+
492
+ def _from(self, methname, builder, value):
493
+ struct = ir.Constant(self.get_value_type(), ir.Undefined)
494
+
495
+ for i, (dm, val) in enumerate(zip(self._models, value)):
496
+ v = getattr(dm, methname)(builder, val)
497
+ struct = self.set(builder, struct, v, i)
498
+
499
+ return struct
500
+
501
+ def as_data(self, builder, value):
502
+ """
503
+ Converts the LLVM struct in `value` into a representation suited for
504
+ storing into arrays.
505
+
506
+ Note
507
+ ----
508
+ Current implementation rarely changes how types are represented for
509
+ "value" and "data". This is usually a pointless rebuild of the
510
+ immutable LLVM struct value. Luckily, LLVM optimization removes all
511
+ redundancy.
512
+
513
+ Sample usecase: Structures nested with pointers to other structures
514
+ that can be serialized into a flat representation when storing into
515
+ array.
516
+ """
517
+ elems = self._as("as_data", builder, value)
518
+ struct = ir.Constant(self.get_data_type(), ir.Undefined)
519
+ for i, el in enumerate(elems):
520
+ struct = builder.insert_value(struct, el, [i])
521
+ return struct
522
+
523
+ def from_data(self, builder, value):
524
+ """
525
+ Convert from "data" representation back into "value" representation.
526
+ Usually invoked when loading from array.
527
+
528
+ See notes in `as_data()`
529
+ """
530
+ vals = [builder.extract_value(value, [i])
531
+ for i in range(len(self._members))]
532
+ return self._from("from_data", builder, vals)
533
+
534
+ def load_from_data_pointer(self, builder, ptr, align=None):
535
+ values = []
536
+ for i, model in enumerate(self._models):
537
+ elem_ptr = cgutils.gep_inbounds(builder, ptr, 0, i)
538
+ val = model.load_from_data_pointer(builder, elem_ptr, align)
539
+ values.append(val)
540
+
541
+ struct = ir.Constant(self.get_value_type(), ir.Undefined)
542
+ for i, val in enumerate(values):
543
+ struct = self.set(builder, struct, val, i)
544
+ return struct
545
+
546
+ def as_argument(self, builder, value):
547
+ return self._as("as_argument", builder, value)
548
+
549
+ def from_argument(self, builder, value):
550
+ return self._from("from_argument", builder, value)
551
+
552
+ def as_return(self, builder, value):
553
+ elems = self._as("as_data", builder, value)
554
+ struct = ir.Constant(self.get_data_type(), ir.Undefined)
555
+ for i, el in enumerate(elems):
556
+ struct = builder.insert_value(struct, el, [i])
557
+ return struct
558
+
559
+ def from_return(self, builder, value):
560
+ vals = [builder.extract_value(value, [i])
561
+ for i in range(len(self._members))]
562
+ return self._from("from_data", builder, vals)
563
+
564
+ def get(self, builder, val, pos):
565
+ """Get a field at the given position or the fieldname
566
+
567
+ Args
568
+ ----
569
+ builder:
570
+ LLVM IRBuilder
571
+ val:
572
+ value to be inserted
573
+ pos: int or str
574
+ field index or field name
575
+
576
+ Returns
577
+ -------
578
+ Extracted value
579
+ """
580
+ if isinstance(pos, str):
581
+ pos = self.get_field_position(pos)
582
+ return builder.extract_value(val, [pos],
583
+ name="extracted." + self._fields[pos])
584
+
585
+ def set(self, builder, stval, val, pos):
586
+ """Set a field at the given position or the fieldname
587
+
588
+ Args
589
+ ----
590
+ builder:
591
+ LLVM IRBuilder
592
+ stval:
593
+ LLVM struct value
594
+ val:
595
+ value to be inserted
596
+ pos: int or str
597
+ field index or field name
598
+
599
+ Returns
600
+ -------
601
+ A new LLVM struct with the value inserted
602
+ """
603
+ if isinstance(pos, str):
604
+ pos = self.get_field_position(pos)
605
+ return builder.insert_value(stval, val, [pos],
606
+ name="inserted." + self._fields[pos])
607
+
608
+ def get_field_position(self, field):
609
+ try:
610
+ return self._fields.index(field)
611
+ except ValueError:
612
+ raise KeyError("%s does not have a field named %r"
613
+ % (self.__class__.__name__, field))
614
+
615
+ @property
616
+ def field_count(self):
617
+ return len(self._fields)
618
+
619
+ def get_type(self, pos):
620
+ """Get the frontend type (numba type) of a field given the position
621
+ or the fieldname
622
+
623
+ Args
624
+ ----
625
+ pos: int or str
626
+ field index or field name
627
+ """
628
+ if isinstance(pos, str):
629
+ pos = self.get_field_position(pos)
630
+ return self._members[pos]
631
+
632
+ def get_model(self, pos):
633
+ """
634
+ Get the datamodel of a field given the position or the fieldname.
635
+
636
+ Args
637
+ ----
638
+ pos: int or str
639
+ field index or field name
640
+ """
641
+ return self._models[pos]
642
+
643
+ def traverse(self, builder):
644
+ def getter(k, value):
645
+ if value.type != self.get_value_type():
646
+ args = self.get_value_type(), value.type
647
+ raise TypeError("expecting {0} but got {1}".format(*args))
648
+ return self.get(builder, value, k)
649
+
650
+ return [(self.get_type(k), partial(getter, k)) for k in self._fields]
651
+
652
+ def inner_models(self):
653
+ return self._models
654
+
655
+
656
+ @register_default(types.PythonBoolean)
657
+ @register_default(types.PythonBooleanLiteral)
658
+ @register_default(types.NumPyBoolean)
659
+ @register_default(types.NumPyBooleanLiteral)
660
+ @register_default(types.MachineBoolean)
661
+ @register_default(types.MachineBooleanLiteral)
662
+ class BooleanModel(DataModel):
663
+ _bit_type = ir.IntType(1)
664
+ _byte_type = ir.IntType(8)
665
+
666
+ def get_value_type(self):
667
+ return self._bit_type
668
+
669
+ def get_data_type(self):
670
+ return self._byte_type
671
+
672
+ def get_return_type(self):
673
+ return self.get_data_type()
674
+
675
+ def get_argument_type(self):
676
+ return self.get_data_type()
677
+
678
+ def as_data(self, builder, value):
679
+ return builder.zext(value, self.get_data_type())
680
+
681
+ def as_argument(self, builder, value):
682
+ return self.as_data(builder, value)
683
+
684
+ def as_return(self, builder, value):
685
+ return self.as_data(builder, value)
686
+
687
+ def from_data(self, builder, value):
688
+ ty = self.get_value_type()
689
+ resalloca = cgutils.alloca_once(builder, ty)
690
+ cond = builder.icmp_unsigned('==', value, value.type(0))
691
+ with builder.if_else(cond) as (then, otherwise):
692
+ with then:
693
+ builder.store(ty(0), resalloca)
694
+ with otherwise:
695
+ builder.store(ty(1), resalloca)
696
+ return builder.load(resalloca)
697
+
698
+ def from_argument(self, builder, value):
699
+ return self.from_data(builder, value)
700
+
701
+ def from_return(self, builder, value):
702
+ return self.from_data(builder, value)
703
+
704
+
705
+ @register_default(types.PythonInteger)
706
+ @register_default(types.PythonIntegerLiteral)
707
+ @register_default(types.NumPyInteger)
708
+ @register_default(types.NumPyIntegerLiteral)
709
+ @register_default(types.MachineInteger)
710
+ @register_default(types.MachineIntegerLiteral)
711
+ class IntegerModel(PrimitiveModel):
712
+ def __init__(self, dmm, fe_type):
713
+ be_type = ir.IntType(fe_type.bitwidth)
714
+ super(IntegerModel, self).__init__(dmm, fe_type, be_type)
715
+
716
+
717
+ @register_default(types.PythonFloat)
718
+ @register_default(types.NumPyFloat)
719
+ @register_default(types.MachineFloat)
720
+ class FloatModel(PrimitiveModel):
721
+ def __init__(self, dmm, fe_type):
722
+ be_type = ir.DoubleType()
723
+ super(FloatModel, self).__init__(dmm, fe_type, be_type)
724
+
725
+
726
+ @register_default(types.PythonComplex)
727
+ @register_default(types.NumPyComplex)
728
+ @register_default(types.MachineComplex)
729
+ class ComplexModel(StructModel):
730
+ _element_type = NotImplemented
731
+
732
+ def __init__(self, dmm, fe_type):
733
+ members = [
734
+ ('real', fe_type.underlying_float),
735
+ ('imag', fe_type.underlying_float),
736
+ ]
737
+ super(ComplexModel, self).__init__(dmm, fe_type, members)
738
+
739
+
740
+ @register_default(types.LiteralList)
741
+ @register_default(types.LiteralStrKeyDict)
742
+ @register_default(types.Tuple)
743
+ @register_default(types.NamedTuple)
744
+ @register_default(types.StarArgTuple)
745
+ class TupleModel(StructModel):
746
+ def __init__(self, dmm, fe_type):
747
+ members = [('f' + str(i), t) for i, t in enumerate(fe_type)]
748
+ super(TupleModel, self).__init__(dmm, fe_type, members)
749
+
750
+
751
+ @register_default(types.UnionType)
752
+ class UnionModel(StructModel):
753
+ def __init__(self, dmm, fe_type):
754
+ members = [
755
+ ('tag', types.uintp),
756
+ # XXX: it should really be a MemInfoPointer(types.voidptr)
757
+ ('payload', types.Tuple.from_types(fe_type.types)),
758
+ ]
759
+ super(UnionModel, self).__init__(dmm, fe_type, members)
760
+
761
+
762
+ @register_default(types.Pair)
763
+ class PairModel(StructModel):
764
+ def __init__(self, dmm, fe_type):
765
+ members = [('first', fe_type.first_type),
766
+ ('second', fe_type.second_type)]
767
+ super(PairModel, self).__init__(dmm, fe_type, members)
768
+
769
+
770
+ @register_default(types.ListPayload)
771
+ class ListPayloadModel(StructModel):
772
+ def __init__(self, dmm, fe_type):
773
+ # The fields are mutable but the payload is always manipulated
774
+ # by reference. This scheme allows mutations of an array to
775
+ # be seen by its iterators.
776
+ members = [
777
+ ('size', types.intp),
778
+ ('allocated', types.intp),
779
+ # This member is only used only for reflected lists
780
+ ('dirty', types.boolean),
781
+ # Actually an inlined var-sized array
782
+ ('data', fe_type.container.dtype),
783
+ ]
784
+ super(ListPayloadModel, self).__init__(dmm, fe_type, members)
785
+
786
+
787
+ @register_default(types.List)
788
+ class ListModel(StructModel):
789
+ def __init__(self, dmm, fe_type):
790
+ payload_type = types.ListPayload(fe_type)
791
+ members = [
792
+ # The meminfo data points to a ListPayload
793
+ ('meminfo', types.MemInfoPointer(payload_type)),
794
+ # This member is only used only for reflected lists
795
+ ('parent', types.pyobject),
796
+ ]
797
+ super(ListModel, self).__init__(dmm, fe_type, members)
798
+
799
+
800
+ @register_default(types.ListIter)
801
+ class ListIterModel(StructModel):
802
+ def __init__(self, dmm, fe_type):
803
+ payload_type = types.ListPayload(fe_type.container)
804
+ members = [
805
+ # The meminfo data points to a ListPayload (shared with the
806
+ # original list object)
807
+ ('meminfo', types.MemInfoPointer(payload_type)),
808
+ ('index', types.EphemeralPointer(types.intp)),
809
+ ]
810
+ super(ListIterModel, self).__init__(dmm, fe_type, members)
811
+
812
+
813
+ @register_default(types.SetEntry)
814
+ class SetEntryModel(StructModel):
815
+ def __init__(self, dmm, fe_type):
816
+ dtype = fe_type.set_type.dtype
817
+ members = [
818
+ # -1 = empty, -2 = deleted
819
+ ('hash', types.intp),
820
+ ('key', dtype),
821
+ ]
822
+ super(SetEntryModel, self).__init__(dmm, fe_type, members)
823
+
824
+
825
+ @register_default(types.SetPayload)
826
+ class SetPayloadModel(StructModel):
827
+ def __init__(self, dmm, fe_type):
828
+ entry_type = types.SetEntry(fe_type.container)
829
+ members = [
830
+ # Number of active + deleted entries
831
+ ('fill', types.intp),
832
+ # Number of active entries
833
+ ('used', types.intp),
834
+ # Allocated size - 1 (size being a power of 2)
835
+ ('mask', types.intp),
836
+ # Search finger
837
+ ('finger', types.intp),
838
+ # This member is only used only for reflected sets
839
+ ('dirty', types.boolean),
840
+ # Actually an inlined var-sized array
841
+ ('entries', entry_type),
842
+ ]
843
+ super(SetPayloadModel, self).__init__(dmm, fe_type, members)
844
+
845
+ @register_default(types.Set)
846
+ class SetModel(StructModel):
847
+ def __init__(self, dmm, fe_type):
848
+ payload_type = types.SetPayload(fe_type)
849
+ members = [
850
+ # The meminfo data points to a SetPayload
851
+ ('meminfo', types.MemInfoPointer(payload_type)),
852
+ # This member is only used only for reflected sets
853
+ ('parent', types.pyobject),
854
+ ]
855
+ super(SetModel, self).__init__(dmm, fe_type, members)
856
+
857
+ @register_default(types.SetIter)
858
+ class SetIterModel(StructModel):
859
+ def __init__(self, dmm, fe_type):
860
+ payload_type = types.SetPayload(fe_type.container)
861
+ members = [
862
+ # The meminfo data points to a SetPayload (shared with the
863
+ # original set object)
864
+ ('meminfo', types.MemInfoPointer(payload_type)),
865
+ # The index into the entries table
866
+ ('index', types.EphemeralPointer(types.intp)),
867
+ ]
868
+ super(SetIterModel, self).__init__(dmm, fe_type, members)
869
+
870
+
871
+ @register_default(types.Array)
872
+ @register_default(types.Buffer)
873
+ @register_default(types.ByteArray)
874
+ @register_default(types.Bytes)
875
+ @register_default(types.MemoryView)
876
+ @register_default(types.PyArray)
877
+ class ArrayModel(StructModel):
878
+ def __init__(self, dmm, fe_type):
879
+ ndim = fe_type.ndim
880
+ members = [
881
+ ('meminfo', types.MemInfoPointer(fe_type.dtype)),
882
+ ('parent', types.pyobject),
883
+ ('nitems', types.intp),
884
+ ('itemsize', types.intp),
885
+ ('data', types.CPointer(fe_type.dtype)),
886
+ ('shape', types.UniTuple(types.intp, ndim)),
887
+ ('strides', types.UniTuple(types.intp, ndim)),
888
+
889
+ ]
890
+ super(ArrayModel, self).__init__(dmm, fe_type, members)
891
+
892
+
893
+ @register_default(types.ArrayFlags)
894
+ class ArrayFlagsModel(StructModel):
895
+ def __init__(self, dmm, fe_type):
896
+ members = [
897
+ ('parent', fe_type.array_type),
898
+ ]
899
+ super(ArrayFlagsModel, self).__init__(dmm, fe_type, members)
900
+
901
+
902
+ @register_default(types.NestedArray)
903
+ class NestedArrayModel(ArrayModel):
904
+ def __init__(self, dmm, fe_type):
905
+ self._be_type = dmm.lookup(fe_type.dtype).get_data_type()
906
+ super(NestedArrayModel, self).__init__(dmm, fe_type)
907
+
908
+ def as_storage_type(self):
909
+ """Return the LLVM type representation for the storage of
910
+ the nestedarray.
911
+ """
912
+ ret = ir.ArrayType(self._be_type, self._fe_type.nitems)
913
+ return ret
914
+
915
+
916
+ @register_default(types.Optional)
917
+ class OptionalModel(StructModel):
918
+ def __init__(self, dmm, fe_type):
919
+ members = [
920
+ ('data', fe_type.type),
921
+ ('valid', types.boolean),
922
+ ]
923
+ self._value_model = dmm.lookup(fe_type.type)
924
+ super(OptionalModel, self).__init__(dmm, fe_type, members)
925
+
926
+ def get_return_type(self):
927
+ return self._value_model.get_return_type()
928
+
929
+ def as_return(self, builder, value):
930
+ raise NotImplementedError
931
+
932
+ def from_return(self, builder, value):
933
+ return self._value_model.from_return(builder, value)
934
+
935
+ def traverse(self, builder):
936
+ def get_data(value):
937
+ valid = get_valid(value)
938
+ data = self.get(builder, value, "data")
939
+ return builder.select(valid, data, ir.Constant(data.type, None))
940
+ def get_valid(value):
941
+ return self.get(builder, value, "valid")
942
+
943
+ return [(self.get_type("data"), get_data),
944
+ (self.get_type("valid"), get_valid)]
945
+
946
+
947
+ @register_default(types.Record)
948
+ class RecordModel(CompositeModel):
949
+ def __init__(self, dmm, fe_type):
950
+ super(RecordModel, self).__init__(dmm, fe_type)
951
+ self._models = [self._dmm.lookup(t) for _, t in fe_type.members]
952
+ self._be_type = ir.ArrayType(ir.IntType(8), fe_type.size)
953
+ self._be_ptr_type = self._be_type.as_pointer()
954
+
955
+ def get_value_type(self):
956
+ """Passed around as reference to underlying data
957
+ """
958
+ return self._be_ptr_type
959
+
960
+ def get_argument_type(self):
961
+ return self._be_ptr_type
962
+
963
+ def get_return_type(self):
964
+ return self._be_ptr_type
965
+
966
+ def get_data_type(self):
967
+ return self._be_type
968
+
969
+ def as_data(self, builder, value):
970
+ return builder.load(value)
971
+
972
+ def from_data(self, builder, value):
973
+ raise NotImplementedError("use load_from_data_pointer() instead")
974
+
975
+ def as_argument(self, builder, value):
976
+ return value
977
+
978
+ def from_argument(self, builder, value):
979
+ return value
980
+
981
+ def as_return(self, builder, value):
982
+ return value
983
+
984
+ def from_return(self, builder, value):
985
+ return value
986
+
987
+ def load_from_data_pointer(self, builder, ptr, align=None):
988
+ return builder.bitcast(ptr, self.get_value_type())
989
+
990
+
991
+ @register_default(types.UnicodeCharSeq)
992
+ class UnicodeCharSeq(DataModel):
993
+ def __init__(self, dmm, fe_type):
994
+ super(UnicodeCharSeq, self).__init__(dmm, fe_type)
995
+ charty = ir.IntType(numpy_support.sizeof_unicode_char * 8)
996
+ self._be_type = ir.ArrayType(charty, fe_type.count)
997
+
998
+ def get_value_type(self):
999
+ return self._be_type
1000
+
1001
+ def get_data_type(self):
1002
+ return self._be_type
1003
+
1004
+ def as_data(self, builder, value):
1005
+ return value
1006
+
1007
+ def from_data(self, builder, value):
1008
+ return value
1009
+
1010
+ def as_return(self, builder, value):
1011
+ return value
1012
+
1013
+ def from_return(self, builder, value):
1014
+ return value
1015
+
1016
+ def as_argument(self, builder, value):
1017
+ return value
1018
+
1019
+ def from_argument(self, builder, value):
1020
+ return value
1021
+
1022
+
1023
+ @register_default(types.CharSeq)
1024
+ class CharSeq(DataModel):
1025
+ def __init__(self, dmm, fe_type):
1026
+ super(CharSeq, self).__init__(dmm, fe_type)
1027
+ charty = ir.IntType(8)
1028
+ self._be_type = ir.ArrayType(charty, fe_type.count)
1029
+
1030
+ def get_value_type(self):
1031
+ return self._be_type
1032
+
1033
+ def get_data_type(self):
1034
+ return self._be_type
1035
+
1036
+ def as_data(self, builder, value):
1037
+ return value
1038
+
1039
+ def from_data(self, builder, value):
1040
+ return value
1041
+
1042
+ def as_return(self, builder, value):
1043
+ return value
1044
+
1045
+ def from_return(self, builder, value):
1046
+ return value
1047
+
1048
+ def as_argument(self, builder, value):
1049
+ return value
1050
+
1051
+ def from_argument(self, builder, value):
1052
+ return value
1053
+
1054
+
1055
+ class CContiguousFlatIter(StructModel):
1056
+ def __init__(self, dmm, fe_type, need_indices):
1057
+ assert fe_type.array_type.layout == 'C'
1058
+ array_type = fe_type.array_type
1059
+ dtype = array_type.dtype
1060
+ ndim = array_type.ndim
1061
+ members = [('array', array_type),
1062
+ ('stride', types.intp),
1063
+ ('index', types.EphemeralPointer(types.intp)),
1064
+ ]
1065
+ if need_indices:
1066
+ # For ndenumerate()
1067
+ members.append(('indices', types.EphemeralArray(types.intp, ndim)))
1068
+ super(CContiguousFlatIter, self).__init__(dmm, fe_type, members)
1069
+
1070
+
1071
+ class FlatIter(StructModel):
1072
+ def __init__(self, dmm, fe_type):
1073
+ array_type = fe_type.array_type
1074
+ dtype = array_type.dtype
1075
+ ndim = array_type.ndim
1076
+ members = [('array', array_type),
1077
+ ('pointers', types.EphemeralArray(types.CPointer(dtype), ndim)),
1078
+ ('indices', types.EphemeralArray(types.intp, ndim)),
1079
+ ('exhausted', types.EphemeralPointer(types.boolean)),
1080
+ ]
1081
+ super(FlatIter, self).__init__(dmm, fe_type, members)
1082
+
1083
+
1084
+ @register_default(types.UniTupleIter)
1085
+ class UniTupleIter(StructModel):
1086
+ def __init__(self, dmm, fe_type):
1087
+ members = [('index', types.EphemeralPointer(types.intp)),
1088
+ ('tuple', fe_type.container,)]
1089
+ super(UniTupleIter, self).__init__(dmm, fe_type, members)
1090
+
1091
+
1092
+ @register_default(types.misc.SliceLiteral)
1093
+ @register_default(types.SliceType)
1094
+ class SliceModel(StructModel):
1095
+ def __init__(self, dmm, fe_type):
1096
+ members = [('start', types.intp),
1097
+ ('stop', types.intp),
1098
+ ('step', types.intp),
1099
+ ]
1100
+ super(SliceModel, self).__init__(dmm, fe_type, members)
1101
+
1102
+
1103
+ @register_default(types.NPDatetime)
1104
+ @register_default(types.NPTimedelta)
1105
+ class NPDatetimeModel(PrimitiveModel):
1106
+ def __init__(self, dmm, fe_type):
1107
+ be_type = ir.IntType(64)
1108
+ super(NPDatetimeModel, self).__init__(dmm, fe_type, be_type)
1109
+
1110
+
1111
+ @register_default(types.ArrayIterator)
1112
+ class ArrayIterator(StructModel):
1113
+ def __init__(self, dmm, fe_type):
1114
+ # We use an unsigned index to avoid the cost of negative index tests.
1115
+ members = [('index', types.EphemeralPointer(types.uintp)),
1116
+ ('array', fe_type.array_type)]
1117
+ super(ArrayIterator, self).__init__(dmm, fe_type, members)
1118
+
1119
+
1120
+ @register_default(types.EnumerateType)
1121
+ class EnumerateType(StructModel):
1122
+ def __init__(self, dmm, fe_type):
1123
+ members = [('count', types.EphemeralPointer(types.intp)),
1124
+ ('iter', fe_type.source_type)]
1125
+
1126
+ super(EnumerateType, self).__init__(dmm, fe_type, members)
1127
+
1128
+
1129
+ @register_default(types.ZipType)
1130
+ class ZipType(StructModel):
1131
+ def __init__(self, dmm, fe_type):
1132
+ members = [('iter%d' % i, source_type.iterator_type)
1133
+ for i, source_type in enumerate(fe_type.source_types)]
1134
+ super(ZipType, self).__init__(dmm, fe_type, members)
1135
+
1136
+
1137
+ @register_default(types.RangeIteratorType)
1138
+ class RangeIteratorType(StructModel):
1139
+ def __init__(self, dmm, fe_type):
1140
+ int_type = fe_type.yield_type
1141
+ members = [('iter', types.EphemeralPointer(int_type)),
1142
+ ('stop', int_type),
1143
+ ('step', int_type),
1144
+ ('count', types.EphemeralPointer(int_type))]
1145
+ super(RangeIteratorType, self).__init__(dmm, fe_type, members)
1146
+
1147
+
1148
+ @register_default(types.Generator)
1149
+ class GeneratorModel(CompositeModel):
1150
+ def __init__(self, dmm, fe_type):
1151
+ super(GeneratorModel, self).__init__(dmm, fe_type)
1152
+ # XXX Fold this in DataPacker?
1153
+ self._arg_models = [self._dmm.lookup(t) for t in fe_type.arg_types
1154
+ if not isinstance(t, types.Omitted)]
1155
+ self._state_models = [self._dmm.lookup(t) for t in fe_type.state_types]
1156
+
1157
+ self._args_be_type = ir.LiteralStructType(
1158
+ [t.get_data_type() for t in self._arg_models])
1159
+ self._state_be_type = ir.LiteralStructType(
1160
+ [t.get_data_type() for t in self._state_models])
1161
+ # The whole generator closure
1162
+ self._be_type = ir.LiteralStructType(
1163
+ [self._dmm.lookup(types.int32).get_value_type(),
1164
+ self._args_be_type, self._state_be_type])
1165
+ self._be_ptr_type = self._be_type.as_pointer()
1166
+
1167
+ def get_value_type(self):
1168
+ """
1169
+ The generator closure is passed around as a reference.
1170
+ """
1171
+ return self._be_ptr_type
1172
+
1173
+ def get_argument_type(self):
1174
+ return self._be_ptr_type
1175
+
1176
+ def get_return_type(self):
1177
+ return self._be_type
1178
+
1179
+ def get_data_type(self):
1180
+ return self._be_type
1181
+
1182
+ def as_argument(self, builder, value):
1183
+ return value
1184
+
1185
+ def from_argument(self, builder, value):
1186
+ return value
1187
+
1188
+ def as_return(self, builder, value):
1189
+ return self.as_data(builder, value)
1190
+
1191
+ def from_return(self, builder, value):
1192
+ return self.from_data(builder, value)
1193
+
1194
+ def as_data(self, builder, value):
1195
+ return builder.load(value)
1196
+
1197
+ def from_data(self, builder, value):
1198
+ stack = cgutils.alloca_once(builder, value.type)
1199
+ builder.store(value, stack)
1200
+ return stack
1201
+
1202
+
1203
+ @register_default(types.ArrayCTypes)
1204
+ class ArrayCTypesModel(StructModel):
1205
+ def __init__(self, dmm, fe_type):
1206
+ # ndim = fe_type.ndim
1207
+ members = [('data', types.CPointer(fe_type.dtype)),
1208
+ ('meminfo', types.MemInfoPointer(fe_type.dtype))]
1209
+ super(ArrayCTypesModel, self).__init__(dmm, fe_type, members)
1210
+
1211
+
1212
+ @register_default(types.RangeType)
1213
+ class RangeModel(StructModel):
1214
+ def __init__(self, dmm, fe_type):
1215
+ int_type = fe_type.iterator_type.yield_type
1216
+ members = [('start', int_type),
1217
+ ('stop', int_type),
1218
+ ('step', int_type)]
1219
+ super(RangeModel, self).__init__(dmm, fe_type, members)
1220
+
1221
+
1222
+ # =============================================================================
1223
+
1224
+ @register_default(types.NumpyNdIndexType)
1225
+ class NdIndexModel(StructModel):
1226
+ def __init__(self, dmm, fe_type):
1227
+ ndim = fe_type.ndim
1228
+ members = [('shape', types.UniTuple(types.intp, ndim)),
1229
+ ('indices', types.EphemeralArray(types.intp, ndim)),
1230
+ ('exhausted', types.EphemeralPointer(types.boolean)),
1231
+ ]
1232
+ super(NdIndexModel, self).__init__(dmm, fe_type, members)
1233
+
1234
+
1235
+ @register_default(types.NumpyFlatType)
1236
+ def handle_numpy_flat_type(dmm, ty):
1237
+ if ty.array_type.layout == 'C':
1238
+ return CContiguousFlatIter(dmm, ty, need_indices=False)
1239
+ else:
1240
+ return FlatIter(dmm, ty)
1241
+
1242
+ @register_default(types.NumpyNdEnumerateType)
1243
+ def handle_numpy_ndenumerate_type(dmm, ty):
1244
+ if ty.array_type.layout == 'C':
1245
+ return CContiguousFlatIter(dmm, ty, need_indices=True)
1246
+ else:
1247
+ return FlatIter(dmm, ty)
1248
+
1249
+ @register_default(types.BoundFunction)
1250
+ def handle_bound_function(dmm, ty):
1251
+ # The same as the underlying type
1252
+ return dmm[ty.this]
1253
+
1254
+
1255
+ @register_default(types.NumpyNdIterType)
1256
+ class NdIter(StructModel):
1257
+ def __init__(self, dmm, fe_type):
1258
+ array_types = fe_type.arrays
1259
+ ndim = fe_type.ndim
1260
+ shape_len = ndim if fe_type.need_shaped_indexing else 1
1261
+ members = [('exhausted', types.EphemeralPointer(types.boolean)),
1262
+ ('arrays', types.Tuple(array_types)),
1263
+ # The iterator's main shape and indices
1264
+ ('shape', types.UniTuple(types.intp, shape_len)),
1265
+ ('indices', types.EphemeralArray(types.intp, shape_len)),
1266
+ ]
1267
+ # Indexing state for the various sub-iterators
1268
+ # XXX use a tuple instead?
1269
+ for i, sub in enumerate(fe_type.indexers):
1270
+ kind, start_dim, end_dim, _ = sub
1271
+ member_name = 'index%d' % i
1272
+ if kind == 'flat':
1273
+ # A single index into the flattened array
1274
+ members.append((member_name, types.EphemeralPointer(types.intp)))
1275
+ elif kind in ('scalar', 'indexed', '0d'):
1276
+ # Nothing required
1277
+ pass
1278
+ else:
1279
+ assert 0
1280
+ # Slots holding values of the scalar args
1281
+ # XXX use a tuple instead?
1282
+ for i, ty in enumerate(fe_type.arrays):
1283
+ if not isinstance(ty, types.Array):
1284
+ member_name = 'scalar%d' % i
1285
+ members.append((member_name, types.EphemeralPointer(ty)))
1286
+
1287
+ super(NdIter, self).__init__(dmm, fe_type, members)
1288
+
1289
+
1290
+ @register_default(types.DeferredType)
1291
+ class DeferredStructModel(CompositeModel):
1292
+ def __init__(self, dmm, fe_type):
1293
+ super(DeferredStructModel, self).__init__(dmm, fe_type)
1294
+ self.typename = "deferred.{0}".format(id(fe_type))
1295
+ self.actual_fe_type = fe_type.get()
1296
+
1297
+ def get_value_type(self):
1298
+ return ir.global_context.get_identified_type(self.typename + '.value')
1299
+
1300
+ def get_data_type(self):
1301
+ return ir.global_context.get_identified_type(self.typename + '.data')
1302
+
1303
+ def get_argument_type(self):
1304
+ return self._actual_model.get_argument_type()
1305
+
1306
+ def as_argument(self, builder, value):
1307
+ inner = self.get(builder, value)
1308
+ return self._actual_model.as_argument(builder, inner)
1309
+
1310
+ def from_argument(self, builder, value):
1311
+ res = self._actual_model.from_argument(builder, value)
1312
+ return self.set(builder, self.make_uninitialized(), res)
1313
+
1314
+ def from_data(self, builder, value):
1315
+ self._define()
1316
+ elem = self.get(builder, value)
1317
+ value = self._actual_model.from_data(builder, elem)
1318
+ out = self.make_uninitialized()
1319
+ return self.set(builder, out, value)
1320
+
1321
+ def as_data(self, builder, value):
1322
+ self._define()
1323
+ elem = self.get(builder, value)
1324
+ value = self._actual_model.as_data(builder, elem)
1325
+ out = self.make_uninitialized(kind='data')
1326
+ return self.set(builder, out, value)
1327
+
1328
+ def from_return(self, builder, value):
1329
+ return value
1330
+
1331
+ def as_return(self, builder, value):
1332
+ return value
1333
+
1334
+ def get(self, builder, value):
1335
+ return builder.extract_value(value, [0])
1336
+
1337
+ def set(self, builder, value, content):
1338
+ return builder.insert_value(value, content, [0])
1339
+
1340
+ def make_uninitialized(self, kind='value'):
1341
+ self._define()
1342
+ if kind == 'value':
1343
+ ty = self.get_value_type()
1344
+ else:
1345
+ ty = self.get_data_type()
1346
+ return ir.Constant(ty, ir.Undefined)
1347
+
1348
+ def _define(self):
1349
+ valty = self.get_value_type()
1350
+ self._define_value_type(valty)
1351
+ datty = self.get_data_type()
1352
+ self._define_data_type(datty)
1353
+
1354
+ def _define_value_type(self, value_type):
1355
+ if value_type.is_opaque:
1356
+ value_type.set_body(self._actual_model.get_value_type())
1357
+
1358
+ def _define_data_type(self, data_type):
1359
+ if data_type.is_opaque:
1360
+ data_type.set_body(self._actual_model.get_data_type())
1361
+
1362
+ @property
1363
+ def _actual_model(self):
1364
+ return self._dmm.lookup(self.actual_fe_type)
1365
+
1366
+ def traverse(self, builder):
1367
+ return [(self.actual_fe_type,
1368
+ lambda value: builder.extract_value(value, [0]))]
1369
+
1370
+
1371
+ @register_default(types.StructRefPayload)
1372
+ class StructPayloadModel(StructModel):
1373
+ """Model for the payload of a mutable struct
1374
+ """
1375
+ def __init__(self, dmm, fe_typ):
1376
+ members = tuple(fe_typ.field_dict.items())
1377
+ super().__init__(dmm, fe_typ, members)
1378
+
1379
+
1380
+ class StructRefModel(StructModel):
1381
+ """Model for a mutable struct.
1382
+ A reference to the payload
1383
+ """
1384
+ def __init__(self, dmm, fe_typ):
1385
+ dtype = fe_typ.get_data_type()
1386
+ members = [
1387
+ ("meminfo", types.MemInfoPointer(dtype)),
1388
+ ]
1389
+ super().__init__(dmm, fe_typ, members)
1390
+
lib/python3.10/site-packages/numba/core/datamodel/old_models.py ADDED
@@ -0,0 +1,1385 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import partial
2
+ from collections import deque
3
+
4
+ from llvmlite import ir
5
+
6
+ from numba.core.datamodel.registry import register_default
7
+ from numba.core import types, cgutils
8
+ from numba.np import numpy_support
9
+
10
+
11
+ class DataModel(object):
12
+ """
13
+ DataModel describe how a FE type is represented in the LLVM IR at
14
+ different contexts.
15
+
16
+ Contexts are:
17
+
18
+ - value: representation inside function body. Maybe stored in stack.
19
+ The representation here are flexible.
20
+
21
+ - data: representation used when storing into containers (e.g. arrays).
22
+
23
+ - argument: representation used for function argument. All composite
24
+ types are unflattened into multiple primitive types.
25
+
26
+ - return: representation used for return argument.
27
+
28
+ Throughput the compiler pipeline, a LLVM value is usually passed around
29
+ in the "value" representation. All "as_" prefix function converts from
30
+ "value" representation. All "from_" prefix function converts to the
31
+ "value" representation.
32
+
33
+ """
34
+ def __init__(self, dmm, fe_type):
35
+ self._dmm = dmm
36
+ self._fe_type = fe_type
37
+
38
+ @property
39
+ def fe_type(self):
40
+ return self._fe_type
41
+
42
+ def get_value_type(self):
43
+ raise NotImplementedError(self)
44
+
45
+ def get_data_type(self):
46
+ return self.get_value_type()
47
+
48
+ def get_argument_type(self):
49
+ """Return a LLVM type or nested tuple of LLVM type
50
+ """
51
+ return self.get_value_type()
52
+
53
+ def get_return_type(self):
54
+ return self.get_value_type()
55
+
56
+ def as_data(self, builder, value):
57
+ raise NotImplementedError(self)
58
+
59
+ def as_argument(self, builder, value):
60
+ """
61
+ Takes one LLVM value
62
+ Return a LLVM value or nested tuple of LLVM value
63
+ """
64
+ raise NotImplementedError(self)
65
+
66
+ def as_return(self, builder, value):
67
+ raise NotImplementedError(self)
68
+
69
+ def from_data(self, builder, value):
70
+ raise NotImplementedError(self)
71
+
72
+ def from_argument(self, builder, value):
73
+ """
74
+ Takes a LLVM value or nested tuple of LLVM value
75
+ Returns one LLVM value
76
+ """
77
+ raise NotImplementedError(self)
78
+
79
+ def from_return(self, builder, value):
80
+ raise NotImplementedError(self)
81
+
82
+ def load_from_data_pointer(self, builder, ptr, align=None):
83
+ """
84
+ Load value from a pointer to data.
85
+ This is the default implementation, sufficient for most purposes.
86
+ """
87
+ return self.from_data(builder, builder.load(ptr, align=align))
88
+
89
+ def traverse(self, builder):
90
+ """
91
+ Traverse contained members.
92
+ Returns a iterable of contained (types, getters).
93
+ Each getter is a one-argument function accepting a LLVM value.
94
+ """
95
+ return []
96
+
97
+ def traverse_models(self):
98
+ """
99
+ Recursively list all models involved in this model.
100
+ """
101
+ return [self._dmm[t] for t in self.traverse_types()]
102
+
103
+ def traverse_types(self):
104
+ """
105
+ Recursively list all frontend types involved in this model.
106
+ """
107
+ types = [self._fe_type]
108
+ queue = deque([self])
109
+ while len(queue) > 0:
110
+ dm = queue.popleft()
111
+
112
+ for i_dm in dm.inner_models():
113
+ if i_dm._fe_type not in types:
114
+ queue.append(i_dm)
115
+ types.append(i_dm._fe_type)
116
+
117
+ return types
118
+
119
+ def inner_models(self):
120
+ """
121
+ List all *inner* models.
122
+ """
123
+ return []
124
+
125
+ def get_nrt_meminfo(self, builder, value):
126
+ """
127
+ Returns the MemInfo object or None if it is not tracked.
128
+ It is only defined for types.meminfo_pointer
129
+ """
130
+ return None
131
+
132
+ def has_nrt_meminfo(self):
133
+ return False
134
+
135
+ def contains_nrt_meminfo(self):
136
+ """
137
+ Recursively check all contained types for need for NRT meminfo.
138
+ """
139
+ return any(model.has_nrt_meminfo() for model in self.traverse_models())
140
+
141
+ def _compared_fields(self):
142
+ return (type(self), self._fe_type)
143
+
144
+ def __hash__(self):
145
+ return hash(tuple(self._compared_fields()))
146
+
147
+ def __eq__(self, other):
148
+ if type(self) is type(other):
149
+ return self._compared_fields() == other._compared_fields()
150
+ else:
151
+ return False
152
+
153
+ def __ne__(self, other):
154
+ return not self.__eq__(other)
155
+
156
+
157
+ @register_default(types.Omitted)
158
+ class OmittedArgDataModel(DataModel):
159
+ """
160
+ A data model for omitted arguments. Only the "argument" representation
161
+ is defined, other representations raise a NotImplementedError.
162
+ """
163
+ # Omitted arguments are using a dummy value type
164
+ def get_value_type(self):
165
+ return ir.LiteralStructType([])
166
+
167
+ # Omitted arguments don't produce any LLVM function argument.
168
+ def get_argument_type(self):
169
+ return ()
170
+
171
+ def as_argument(self, builder, val):
172
+ return ()
173
+
174
+ def from_argument(self, builder, val):
175
+ assert val == (), val
176
+ return None
177
+
178
+
179
+ @register_default(types.Boolean)
180
+ @register_default(types.BooleanLiteral)
181
+ class BooleanModel(DataModel):
182
+ _bit_type = ir.IntType(1)
183
+ _byte_type = ir.IntType(8)
184
+
185
+ def get_value_type(self):
186
+ return self._bit_type
187
+
188
+ def get_data_type(self):
189
+ return self._byte_type
190
+
191
+ def get_return_type(self):
192
+ return self.get_data_type()
193
+
194
+ def get_argument_type(self):
195
+ return self.get_data_type()
196
+
197
+ def as_data(self, builder, value):
198
+ return builder.zext(value, self.get_data_type())
199
+
200
+ def as_argument(self, builder, value):
201
+ return self.as_data(builder, value)
202
+
203
+ def as_return(self, builder, value):
204
+ return self.as_data(builder, value)
205
+
206
+ def from_data(self, builder, value):
207
+ ty = self.get_value_type()
208
+ resalloca = cgutils.alloca_once(builder, ty)
209
+ cond = builder.icmp_unsigned('==', value, value.type(0))
210
+ with builder.if_else(cond) as (then, otherwise):
211
+ with then:
212
+ builder.store(ty(0), resalloca)
213
+ with otherwise:
214
+ builder.store(ty(1), resalloca)
215
+ return builder.load(resalloca)
216
+
217
+ def from_argument(self, builder, value):
218
+ return self.from_data(builder, value)
219
+
220
+ def from_return(self, builder, value):
221
+ return self.from_data(builder, value)
222
+
223
+
224
+ class PrimitiveModel(DataModel):
225
+ """A primitive type can be represented natively in the target in all
226
+ usage contexts.
227
+ """
228
+
229
+ def __init__(self, dmm, fe_type, be_type):
230
+ super(PrimitiveModel, self).__init__(dmm, fe_type)
231
+ self.be_type = be_type
232
+
233
+ def get_value_type(self):
234
+ return self.be_type
235
+
236
+ def as_data(self, builder, value):
237
+ return value
238
+
239
+ def as_argument(self, builder, value):
240
+ return value
241
+
242
+ def as_return(self, builder, value):
243
+ return value
244
+
245
+ def from_data(self, builder, value):
246
+ return value
247
+
248
+ def from_argument(self, builder, value):
249
+ return value
250
+
251
+ def from_return(self, builder, value):
252
+ return value
253
+
254
+
255
+ class ProxyModel(DataModel):
256
+ """
257
+ Helper class for models which delegate to another model.
258
+ """
259
+
260
+ def get_value_type(self):
261
+ return self._proxied_model.get_value_type()
262
+
263
+ def get_data_type(self):
264
+ return self._proxied_model.get_data_type()
265
+
266
+ def get_return_type(self):
267
+ return self._proxied_model.get_return_type()
268
+
269
+ def get_argument_type(self):
270
+ return self._proxied_model.get_argument_type()
271
+
272
+ def as_data(self, builder, value):
273
+ return self._proxied_model.as_data(builder, value)
274
+
275
+ def as_argument(self, builder, value):
276
+ return self._proxied_model.as_argument(builder, value)
277
+
278
+ def as_return(self, builder, value):
279
+ return self._proxied_model.as_return(builder, value)
280
+
281
+ def from_data(self, builder, value):
282
+ return self._proxied_model.from_data(builder, value)
283
+
284
+ def from_argument(self, builder, value):
285
+ return self._proxied_model.from_argument(builder, value)
286
+
287
+ def from_return(self, builder, value):
288
+ return self._proxied_model.from_return(builder, value)
289
+
290
+
291
+ @register_default(types.EnumMember)
292
+ @register_default(types.IntEnumMember)
293
+ class EnumModel(ProxyModel):
294
+ """
295
+ Enum members are represented exactly like their values.
296
+ """
297
+ def __init__(self, dmm, fe_type):
298
+ super(EnumModel, self).__init__(dmm, fe_type)
299
+ self._proxied_model = dmm.lookup(fe_type.dtype)
300
+
301
+
302
+ @register_default(types.Opaque)
303
+ @register_default(types.PyObject)
304
+ @register_default(types.RawPointer)
305
+ @register_default(types.NoneType)
306
+ @register_default(types.StringLiteral)
307
+ @register_default(types.EllipsisType)
308
+ @register_default(types.Function)
309
+ @register_default(types.Type)
310
+ @register_default(types.Object)
311
+ @register_default(types.Module)
312
+ @register_default(types.Phantom)
313
+ @register_default(types.UndefVar)
314
+ @register_default(types.ContextManager)
315
+ @register_default(types.Dispatcher)
316
+ @register_default(types.ObjModeDispatcher)
317
+ @register_default(types.ExceptionClass)
318
+ @register_default(types.Dummy)
319
+ @register_default(types.ExceptionInstance)
320
+ @register_default(types.ExternalFunction)
321
+ @register_default(types.EnumClass)
322
+ @register_default(types.IntEnumClass)
323
+ @register_default(types.NumberClass)
324
+ @register_default(types.TypeRef)
325
+ @register_default(types.NamedTupleClass)
326
+ @register_default(types.DType)
327
+ @register_default(types.RecursiveCall)
328
+ @register_default(types.MakeFunctionLiteral)
329
+ @register_default(types.Poison)
330
+ class OpaqueModel(PrimitiveModel):
331
+ """
332
+ Passed as opaque pointers
333
+ """
334
+ _ptr_type = ir.IntType(8).as_pointer()
335
+
336
+ def __init__(self, dmm, fe_type):
337
+ be_type = self._ptr_type
338
+ super(OpaqueModel, self).__init__(dmm, fe_type, be_type)
339
+
340
+
341
+ @register_default(types.MemInfoPointer)
342
+ class MemInfoModel(OpaqueModel):
343
+
344
+ def inner_models(self):
345
+ return [self._dmm.lookup(self._fe_type.dtype)]
346
+
347
+ def has_nrt_meminfo(self):
348
+ return True
349
+
350
+ def get_nrt_meminfo(self, builder, value):
351
+ return value
352
+
353
+
354
+ @register_default(types.Integer)
355
+ @register_default(types.IntegerLiteral)
356
+ class IntegerModel(PrimitiveModel):
357
+ def __init__(self, dmm, fe_type):
358
+ be_type = ir.IntType(fe_type.bitwidth)
359
+ super(IntegerModel, self).__init__(dmm, fe_type, be_type)
360
+
361
+
362
+ @register_default(types.Float)
363
+ class FloatModel(PrimitiveModel):
364
+ def __init__(self, dmm, fe_type):
365
+ if fe_type == types.float32:
366
+ be_type = ir.FloatType()
367
+ elif fe_type == types.float64:
368
+ be_type = ir.DoubleType()
369
+ else:
370
+ raise NotImplementedError(fe_type)
371
+ super(FloatModel, self).__init__(dmm, fe_type, be_type)
372
+
373
+
374
+ @register_default(types.CPointer)
375
+ class PointerModel(PrimitiveModel):
376
+ def __init__(self, dmm, fe_type):
377
+ self._pointee_model = dmm.lookup(fe_type.dtype)
378
+ self._pointee_be_type = self._pointee_model.get_data_type()
379
+ be_type = self._pointee_be_type.as_pointer()
380
+ super(PointerModel, self).__init__(dmm, fe_type, be_type)
381
+
382
+
383
+ @register_default(types.EphemeralPointer)
384
+ class EphemeralPointerModel(PointerModel):
385
+
386
+ def get_data_type(self):
387
+ return self._pointee_be_type
388
+
389
+ def as_data(self, builder, value):
390
+ value = builder.load(value)
391
+ return self._pointee_model.as_data(builder, value)
392
+
393
+ def from_data(self, builder, value):
394
+ raise NotImplementedError("use load_from_data_pointer() instead")
395
+
396
+ def load_from_data_pointer(self, builder, ptr, align=None):
397
+ return builder.bitcast(ptr, self.get_value_type())
398
+
399
+
400
+ @register_default(types.EphemeralArray)
401
+ class EphemeralArrayModel(PointerModel):
402
+
403
+ def __init__(self, dmm, fe_type):
404
+ super(EphemeralArrayModel, self).__init__(dmm, fe_type)
405
+ self._data_type = ir.ArrayType(self._pointee_be_type,
406
+ self._fe_type.count)
407
+
408
+ def get_data_type(self):
409
+ return self._data_type
410
+
411
+ def as_data(self, builder, value):
412
+ values = [builder.load(cgutils.gep_inbounds(builder, value, i))
413
+ for i in range(self._fe_type.count)]
414
+ return cgutils.pack_array(builder, values)
415
+
416
+ def from_data(self, builder, value):
417
+ raise NotImplementedError("use load_from_data_pointer() instead")
418
+
419
+ def load_from_data_pointer(self, builder, ptr, align=None):
420
+ return builder.bitcast(ptr, self.get_value_type())
421
+
422
+
423
+ @register_default(types.ExternalFunctionPointer)
424
+ class ExternalFuncPointerModel(PrimitiveModel):
425
+ def __init__(self, dmm, fe_type):
426
+ sig = fe_type.sig
427
+ # Since the function is non-Numba, there is no adaptation
428
+ # of arguments and return value, hence get_value_type().
429
+ retty = dmm.lookup(sig.return_type).get_value_type()
430
+ args = [dmm.lookup(t).get_value_type() for t in sig.args]
431
+ be_type = ir.PointerType(ir.FunctionType(retty, args))
432
+ super(ExternalFuncPointerModel, self).__init__(dmm, fe_type, be_type)
433
+
434
+
435
+ @register_default(types.UniTuple)
436
+ @register_default(types.NamedUniTuple)
437
+ @register_default(types.StarArgUniTuple)
438
+ class UniTupleModel(DataModel):
439
+ def __init__(self, dmm, fe_type):
440
+ super(UniTupleModel, self).__init__(dmm, fe_type)
441
+ self._elem_model = dmm.lookup(fe_type.dtype)
442
+ self._count = len(fe_type)
443
+ self._value_type = ir.ArrayType(self._elem_model.get_value_type(),
444
+ self._count)
445
+ self._data_type = ir.ArrayType(self._elem_model.get_data_type(),
446
+ self._count)
447
+
448
+ def get_value_type(self):
449
+ return self._value_type
450
+
451
+ def get_data_type(self):
452
+ return self._data_type
453
+
454
+ def get_return_type(self):
455
+ return self.get_value_type()
456
+
457
+ def get_argument_type(self):
458
+ return (self._elem_model.get_argument_type(),) * self._count
459
+
460
+ def as_argument(self, builder, value):
461
+ out = []
462
+ for i in range(self._count):
463
+ v = builder.extract_value(value, [i])
464
+ v = self._elem_model.as_argument(builder, v)
465
+ out.append(v)
466
+ return out
467
+
468
+ def from_argument(self, builder, value):
469
+ out = ir.Constant(self.get_value_type(), ir.Undefined)
470
+ for i, v in enumerate(value):
471
+ v = self._elem_model.from_argument(builder, v)
472
+ out = builder.insert_value(out, v, [i])
473
+ return out
474
+
475
+ def as_data(self, builder, value):
476
+ out = ir.Constant(self.get_data_type(), ir.Undefined)
477
+ for i in range(self._count):
478
+ val = builder.extract_value(value, [i])
479
+ dval = self._elem_model.as_data(builder, val)
480
+ out = builder.insert_value(out, dval, [i])
481
+ return out
482
+
483
+ def from_data(self, builder, value):
484
+ out = ir.Constant(self.get_value_type(), ir.Undefined)
485
+ for i in range(self._count):
486
+ val = builder.extract_value(value, [i])
487
+ dval = self._elem_model.from_data(builder, val)
488
+ out = builder.insert_value(out, dval, [i])
489
+ return out
490
+
491
+ def as_return(self, builder, value):
492
+ return value
493
+
494
+ def from_return(self, builder, value):
495
+ return value
496
+
497
+ def traverse(self, builder):
498
+ def getter(i, value):
499
+ return builder.extract_value(value, i)
500
+ return [(self._fe_type.dtype, partial(getter, i))
501
+ for i in range(self._count)]
502
+
503
+ def inner_models(self):
504
+ return [self._elem_model]
505
+
506
+
507
+ class CompositeModel(DataModel):
508
+ """Any model that is composed of multiple other models should subclass from
509
+ this.
510
+ """
511
+ pass
512
+
513
+
514
+ class StructModel(CompositeModel):
515
+ _value_type = None
516
+ _data_type = None
517
+
518
+ def __init__(self, dmm, fe_type, members):
519
+ super(StructModel, self).__init__(dmm, fe_type)
520
+ if members:
521
+ self._fields, self._members = zip(*members)
522
+ else:
523
+ self._fields = self._members = ()
524
+ self._models = tuple([self._dmm.lookup(t) for t in self._members])
525
+
526
+ def get_member_fe_type(self, name):
527
+ """
528
+ StructModel-specific: get the Numba type of the field named *name*.
529
+ """
530
+ pos = self.get_field_position(name)
531
+ return self._members[pos]
532
+
533
+ def get_value_type(self):
534
+ if self._value_type is None:
535
+ self._value_type = ir.LiteralStructType([t.get_value_type()
536
+ for t in self._models])
537
+ return self._value_type
538
+
539
+ def get_data_type(self):
540
+ if self._data_type is None:
541
+ self._data_type = ir.LiteralStructType([t.get_data_type()
542
+ for t in self._models])
543
+ return self._data_type
544
+
545
+ def get_argument_type(self):
546
+ return tuple([t.get_argument_type() for t in self._models])
547
+
548
+ def get_return_type(self):
549
+ return self.get_data_type()
550
+
551
+ def _as(self, methname, builder, value):
552
+ extracted = []
553
+ for i, dm in enumerate(self._models):
554
+ extracted.append(getattr(dm, methname)(builder,
555
+ self.get(builder, value, i)))
556
+ return tuple(extracted)
557
+
558
+ def _from(self, methname, builder, value):
559
+ struct = ir.Constant(self.get_value_type(), ir.Undefined)
560
+
561
+ for i, (dm, val) in enumerate(zip(self._models, value)):
562
+ v = getattr(dm, methname)(builder, val)
563
+ struct = self.set(builder, struct, v, i)
564
+
565
+ return struct
566
+
567
+ def as_data(self, builder, value):
568
+ """
569
+ Converts the LLVM struct in `value` into a representation suited for
570
+ storing into arrays.
571
+
572
+ Note
573
+ ----
574
+ Current implementation rarely changes how types are represented for
575
+ "value" and "data". This is usually a pointless rebuild of the
576
+ immutable LLVM struct value. Luckily, LLVM optimization removes all
577
+ redundancy.
578
+
579
+ Sample usecase: Structures nested with pointers to other structures
580
+ that can be serialized into a flat representation when storing into
581
+ array.
582
+ """
583
+ elems = self._as("as_data", builder, value)
584
+ struct = ir.Constant(self.get_data_type(), ir.Undefined)
585
+ for i, el in enumerate(elems):
586
+ struct = builder.insert_value(struct, el, [i])
587
+ return struct
588
+
589
+ def from_data(self, builder, value):
590
+ """
591
+ Convert from "data" representation back into "value" representation.
592
+ Usually invoked when loading from array.
593
+
594
+ See notes in `as_data()`
595
+ """
596
+ vals = [builder.extract_value(value, [i])
597
+ for i in range(len(self._members))]
598
+ return self._from("from_data", builder, vals)
599
+
600
+ def load_from_data_pointer(self, builder, ptr, align=None):
601
+ values = []
602
+ for i, model in enumerate(self._models):
603
+ elem_ptr = cgutils.gep_inbounds(builder, ptr, 0, i)
604
+ val = model.load_from_data_pointer(builder, elem_ptr, align)
605
+ values.append(val)
606
+
607
+ struct = ir.Constant(self.get_value_type(), ir.Undefined)
608
+ for i, val in enumerate(values):
609
+ struct = self.set(builder, struct, val, i)
610
+ return struct
611
+
612
+ def as_argument(self, builder, value):
613
+ return self._as("as_argument", builder, value)
614
+
615
+ def from_argument(self, builder, value):
616
+ return self._from("from_argument", builder, value)
617
+
618
+ def as_return(self, builder, value):
619
+ elems = self._as("as_data", builder, value)
620
+ struct = ir.Constant(self.get_data_type(), ir.Undefined)
621
+ for i, el in enumerate(elems):
622
+ struct = builder.insert_value(struct, el, [i])
623
+ return struct
624
+
625
+ def from_return(self, builder, value):
626
+ vals = [builder.extract_value(value, [i])
627
+ for i in range(len(self._members))]
628
+ return self._from("from_data", builder, vals)
629
+
630
+ def get(self, builder, val, pos):
631
+ """Get a field at the given position or the fieldname
632
+
633
+ Args
634
+ ----
635
+ builder:
636
+ LLVM IRBuilder
637
+ val:
638
+ value to be inserted
639
+ pos: int or str
640
+ field index or field name
641
+
642
+ Returns
643
+ -------
644
+ Extracted value
645
+ """
646
+ if isinstance(pos, str):
647
+ pos = self.get_field_position(pos)
648
+ return builder.extract_value(val, [pos],
649
+ name="extracted." + self._fields[pos])
650
+
651
+ def set(self, builder, stval, val, pos):
652
+ """Set a field at the given position or the fieldname
653
+
654
+ Args
655
+ ----
656
+ builder:
657
+ LLVM IRBuilder
658
+ stval:
659
+ LLVM struct value
660
+ val:
661
+ value to be inserted
662
+ pos: int or str
663
+ field index or field name
664
+
665
+ Returns
666
+ -------
667
+ A new LLVM struct with the value inserted
668
+ """
669
+ if isinstance(pos, str):
670
+ pos = self.get_field_position(pos)
671
+ return builder.insert_value(stval, val, [pos],
672
+ name="inserted." + self._fields[pos])
673
+
674
+ def get_field_position(self, field):
675
+ try:
676
+ return self._fields.index(field)
677
+ except ValueError:
678
+ raise KeyError("%s does not have a field named %r"
679
+ % (self.__class__.__name__, field))
680
+
681
+ @property
682
+ def field_count(self):
683
+ return len(self._fields)
684
+
685
+ def get_type(self, pos):
686
+ """Get the frontend type (numba type) of a field given the position
687
+ or the fieldname
688
+
689
+ Args
690
+ ----
691
+ pos: int or str
692
+ field index or field name
693
+ """
694
+ if isinstance(pos, str):
695
+ pos = self.get_field_position(pos)
696
+ return self._members[pos]
697
+
698
+ def get_model(self, pos):
699
+ """
700
+ Get the datamodel of a field given the position or the fieldname.
701
+
702
+ Args
703
+ ----
704
+ pos: int or str
705
+ field index or field name
706
+ """
707
+ return self._models[pos]
708
+
709
+ def traverse(self, builder):
710
+ def getter(k, value):
711
+ if value.type != self.get_value_type():
712
+ args = self.get_value_type(), value.type
713
+ raise TypeError("expecting {0} but got {1}".format(*args))
714
+ return self.get(builder, value, k)
715
+
716
+ return [(self.get_type(k), partial(getter, k)) for k in self._fields]
717
+
718
+ def inner_models(self):
719
+ return self._models
720
+
721
+
722
+ @register_default(types.Complex)
723
+ class ComplexModel(StructModel):
724
+ _element_type = NotImplemented
725
+
726
+ def __init__(self, dmm, fe_type):
727
+ members = [
728
+ ('real', fe_type.underlying_float),
729
+ ('imag', fe_type.underlying_float),
730
+ ]
731
+ super(ComplexModel, self).__init__(dmm, fe_type, members)
732
+
733
+
734
+ @register_default(types.LiteralList)
735
+ @register_default(types.LiteralStrKeyDict)
736
+ @register_default(types.Tuple)
737
+ @register_default(types.NamedTuple)
738
+ @register_default(types.StarArgTuple)
739
+ class TupleModel(StructModel):
740
+ def __init__(self, dmm, fe_type):
741
+ members = [('f' + str(i), t) for i, t in enumerate(fe_type)]
742
+ super(TupleModel, self).__init__(dmm, fe_type, members)
743
+
744
+
745
+ @register_default(types.UnionType)
746
+ class UnionModel(StructModel):
747
+ def __init__(self, dmm, fe_type):
748
+ members = [
749
+ ('tag', types.uintp),
750
+ # XXX: it should really be a MemInfoPointer(types.voidptr)
751
+ ('payload', types.Tuple.from_types(fe_type.types)),
752
+ ]
753
+ super(UnionModel, self).__init__(dmm, fe_type, members)
754
+
755
+
756
+
757
+ @register_default(types.Pair)
758
+ class PairModel(StructModel):
759
+ def __init__(self, dmm, fe_type):
760
+ members = [('first', fe_type.first_type),
761
+ ('second', fe_type.second_type)]
762
+ super(PairModel, self).__init__(dmm, fe_type, members)
763
+
764
+
765
+ @register_default(types.ListPayload)
766
+ class ListPayloadModel(StructModel):
767
+ def __init__(self, dmm, fe_type):
768
+ # The fields are mutable but the payload is always manipulated
769
+ # by reference. This scheme allows mutations of an array to
770
+ # be seen by its iterators.
771
+ members = [
772
+ ('size', types.intp),
773
+ ('allocated', types.intp),
774
+ # This member is only used only for reflected lists
775
+ ('dirty', types.boolean),
776
+ # Actually an inlined var-sized array
777
+ ('data', fe_type.container.dtype),
778
+ ]
779
+ super(ListPayloadModel, self).__init__(dmm, fe_type, members)
780
+
781
+
782
+ @register_default(types.List)
783
+ class ListModel(StructModel):
784
+ def __init__(self, dmm, fe_type):
785
+ payload_type = types.ListPayload(fe_type)
786
+ members = [
787
+ # The meminfo data points to a ListPayload
788
+ ('meminfo', types.MemInfoPointer(payload_type)),
789
+ # This member is only used only for reflected lists
790
+ ('parent', types.pyobject),
791
+ ]
792
+ super(ListModel, self).__init__(dmm, fe_type, members)
793
+
794
+
795
+ @register_default(types.ListIter)
796
+ class ListIterModel(StructModel):
797
+ def __init__(self, dmm, fe_type):
798
+ payload_type = types.ListPayload(fe_type.container)
799
+ members = [
800
+ # The meminfo data points to a ListPayload (shared with the
801
+ # original list object)
802
+ ('meminfo', types.MemInfoPointer(payload_type)),
803
+ ('index', types.EphemeralPointer(types.intp)),
804
+ ]
805
+ super(ListIterModel, self).__init__(dmm, fe_type, members)
806
+
807
+
808
+ @register_default(types.SetEntry)
809
+ class SetEntryModel(StructModel):
810
+ def __init__(self, dmm, fe_type):
811
+ dtype = fe_type.set_type.dtype
812
+ members = [
813
+ # -1 = empty, -2 = deleted
814
+ ('hash', types.intp),
815
+ ('key', dtype),
816
+ ]
817
+ super(SetEntryModel, self).__init__(dmm, fe_type, members)
818
+
819
+
820
+ @register_default(types.SetPayload)
821
+ class SetPayloadModel(StructModel):
822
+ def __init__(self, dmm, fe_type):
823
+ entry_type = types.SetEntry(fe_type.container)
824
+ members = [
825
+ # Number of active + deleted entries
826
+ ('fill', types.intp),
827
+ # Number of active entries
828
+ ('used', types.intp),
829
+ # Allocated size - 1 (size being a power of 2)
830
+ ('mask', types.intp),
831
+ # Search finger
832
+ ('finger', types.intp),
833
+ # This member is only used only for reflected sets
834
+ ('dirty', types.boolean),
835
+ # Actually an inlined var-sized array
836
+ ('entries', entry_type),
837
+ ]
838
+ super(SetPayloadModel, self).__init__(dmm, fe_type, members)
839
+
840
+ @register_default(types.Set)
841
+ class SetModel(StructModel):
842
+ def __init__(self, dmm, fe_type):
843
+ payload_type = types.SetPayload(fe_type)
844
+ members = [
845
+ # The meminfo data points to a SetPayload
846
+ ('meminfo', types.MemInfoPointer(payload_type)),
847
+ # This member is only used only for reflected sets
848
+ ('parent', types.pyobject),
849
+ ]
850
+ super(SetModel, self).__init__(dmm, fe_type, members)
851
+
852
+ @register_default(types.SetIter)
853
+ class SetIterModel(StructModel):
854
+ def __init__(self, dmm, fe_type):
855
+ payload_type = types.SetPayload(fe_type.container)
856
+ members = [
857
+ # The meminfo data points to a SetPayload (shared with the
858
+ # original set object)
859
+ ('meminfo', types.MemInfoPointer(payload_type)),
860
+ # The index into the entries table
861
+ ('index', types.EphemeralPointer(types.intp)),
862
+ ]
863
+ super(SetIterModel, self).__init__(dmm, fe_type, members)
864
+
865
+
866
+ @register_default(types.Array)
867
+ @register_default(types.Buffer)
868
+ @register_default(types.ByteArray)
869
+ @register_default(types.Bytes)
870
+ @register_default(types.MemoryView)
871
+ @register_default(types.PyArray)
872
+ class ArrayModel(StructModel):
873
+ def __init__(self, dmm, fe_type):
874
+ ndim = fe_type.ndim
875
+ members = [
876
+ ('meminfo', types.MemInfoPointer(fe_type.dtype)),
877
+ ('parent', types.pyobject),
878
+ ('nitems', types.intp),
879
+ ('itemsize', types.intp),
880
+ ('data', types.CPointer(fe_type.dtype)),
881
+ ('shape', types.UniTuple(types.intp, ndim)),
882
+ ('strides', types.UniTuple(types.intp, ndim)),
883
+
884
+ ]
885
+ super(ArrayModel, self).__init__(dmm, fe_type, members)
886
+
887
+
888
+ @register_default(types.ArrayFlags)
889
+ class ArrayFlagsModel(StructModel):
890
+ def __init__(self, dmm, fe_type):
891
+ members = [
892
+ ('parent', fe_type.array_type),
893
+ ]
894
+ super(ArrayFlagsModel, self).__init__(dmm, fe_type, members)
895
+
896
+
897
+ @register_default(types.NestedArray)
898
+ class NestedArrayModel(ArrayModel):
899
+ def __init__(self, dmm, fe_type):
900
+ self._be_type = dmm.lookup(fe_type.dtype).get_data_type()
901
+ super(NestedArrayModel, self).__init__(dmm, fe_type)
902
+
903
+ def as_storage_type(self):
904
+ """Return the LLVM type representation for the storage of
905
+ the nestedarray.
906
+ """
907
+ ret = ir.ArrayType(self._be_type, self._fe_type.nitems)
908
+ return ret
909
+
910
+
911
+ @register_default(types.Optional)
912
+ class OptionalModel(StructModel):
913
+ def __init__(self, dmm, fe_type):
914
+ members = [
915
+ ('data', fe_type.type),
916
+ ('valid', types.boolean),
917
+ ]
918
+ self._value_model = dmm.lookup(fe_type.type)
919
+ super(OptionalModel, self).__init__(dmm, fe_type, members)
920
+
921
+ def get_return_type(self):
922
+ return self._value_model.get_return_type()
923
+
924
+ def as_return(self, builder, value):
925
+ raise NotImplementedError
926
+
927
+ def from_return(self, builder, value):
928
+ return self._value_model.from_return(builder, value)
929
+
930
+ def traverse(self, builder):
931
+ def get_data(value):
932
+ valid = get_valid(value)
933
+ data = self.get(builder, value, "data")
934
+ return builder.select(valid, data, ir.Constant(data.type, None))
935
+ def get_valid(value):
936
+ return self.get(builder, value, "valid")
937
+
938
+ return [(self.get_type("data"), get_data),
939
+ (self.get_type("valid"), get_valid)]
940
+
941
+
942
+ @register_default(types.Record)
943
+ class RecordModel(CompositeModel):
944
+ def __init__(self, dmm, fe_type):
945
+ super(RecordModel, self).__init__(dmm, fe_type)
946
+ self._models = [self._dmm.lookup(t) for _, t in fe_type.members]
947
+ self._be_type = ir.ArrayType(ir.IntType(8), fe_type.size)
948
+ self._be_ptr_type = self._be_type.as_pointer()
949
+
950
+ def get_value_type(self):
951
+ """Passed around as reference to underlying data
952
+ """
953
+ return self._be_ptr_type
954
+
955
+ def get_argument_type(self):
956
+ return self._be_ptr_type
957
+
958
+ def get_return_type(self):
959
+ return self._be_ptr_type
960
+
961
+ def get_data_type(self):
962
+ return self._be_type
963
+
964
+ def as_data(self, builder, value):
965
+ return builder.load(value)
966
+
967
+ def from_data(self, builder, value):
968
+ raise NotImplementedError("use load_from_data_pointer() instead")
969
+
970
+ def as_argument(self, builder, value):
971
+ return value
972
+
973
+ def from_argument(self, builder, value):
974
+ return value
975
+
976
+ def as_return(self, builder, value):
977
+ return value
978
+
979
+ def from_return(self, builder, value):
980
+ return value
981
+
982
+ def load_from_data_pointer(self, builder, ptr, align=None):
983
+ return builder.bitcast(ptr, self.get_value_type())
984
+
985
+
986
+ @register_default(types.UnicodeCharSeq)
987
+ class UnicodeCharSeq(DataModel):
988
+ def __init__(self, dmm, fe_type):
989
+ super(UnicodeCharSeq, self).__init__(dmm, fe_type)
990
+ charty = ir.IntType(numpy_support.sizeof_unicode_char * 8)
991
+ self._be_type = ir.ArrayType(charty, fe_type.count)
992
+
993
+ def get_value_type(self):
994
+ return self._be_type
995
+
996
+ def get_data_type(self):
997
+ return self._be_type
998
+
999
+ def as_data(self, builder, value):
1000
+ return value
1001
+
1002
+ def from_data(self, builder, value):
1003
+ return value
1004
+
1005
+ def as_return(self, builder, value):
1006
+ return value
1007
+
1008
+ def from_return(self, builder, value):
1009
+ return value
1010
+
1011
+ def as_argument(self, builder, value):
1012
+ return value
1013
+
1014
+ def from_argument(self, builder, value):
1015
+ return value
1016
+
1017
+
1018
+ @register_default(types.CharSeq)
1019
+ class CharSeq(DataModel):
1020
+ def __init__(self, dmm, fe_type):
1021
+ super(CharSeq, self).__init__(dmm, fe_type)
1022
+ charty = ir.IntType(8)
1023
+ self._be_type = ir.ArrayType(charty, fe_type.count)
1024
+
1025
+ def get_value_type(self):
1026
+ return self._be_type
1027
+
1028
+ def get_data_type(self):
1029
+ return self._be_type
1030
+
1031
+ def as_data(self, builder, value):
1032
+ return value
1033
+
1034
+ def from_data(self, builder, value):
1035
+ return value
1036
+
1037
+ def as_return(self, builder, value):
1038
+ return value
1039
+
1040
+ def from_return(self, builder, value):
1041
+ return value
1042
+
1043
+ def as_argument(self, builder, value):
1044
+ return value
1045
+
1046
+ def from_argument(self, builder, value):
1047
+ return value
1048
+
1049
+
1050
+ class CContiguousFlatIter(StructModel):
1051
+ def __init__(self, dmm, fe_type, need_indices):
1052
+ assert fe_type.array_type.layout == 'C'
1053
+ array_type = fe_type.array_type
1054
+ dtype = array_type.dtype
1055
+ ndim = array_type.ndim
1056
+ members = [('array', array_type),
1057
+ ('stride', types.intp),
1058
+ ('index', types.EphemeralPointer(types.intp)),
1059
+ ]
1060
+ if need_indices:
1061
+ # For ndenumerate()
1062
+ members.append(('indices', types.EphemeralArray(types.intp, ndim)))
1063
+ super(CContiguousFlatIter, self).__init__(dmm, fe_type, members)
1064
+
1065
+
1066
+ class FlatIter(StructModel):
1067
+ def __init__(self, dmm, fe_type):
1068
+ array_type = fe_type.array_type
1069
+ dtype = array_type.dtype
1070
+ ndim = array_type.ndim
1071
+ members = [('array', array_type),
1072
+ ('pointers', types.EphemeralArray(types.CPointer(dtype), ndim)),
1073
+ ('indices', types.EphemeralArray(types.intp, ndim)),
1074
+ ('exhausted', types.EphemeralPointer(types.boolean)),
1075
+ ]
1076
+ super(FlatIter, self).__init__(dmm, fe_type, members)
1077
+
1078
+
1079
+ @register_default(types.UniTupleIter)
1080
+ class UniTupleIter(StructModel):
1081
+ def __init__(self, dmm, fe_type):
1082
+ members = [('index', types.EphemeralPointer(types.intp)),
1083
+ ('tuple', fe_type.container,)]
1084
+ super(UniTupleIter, self).__init__(dmm, fe_type, members)
1085
+
1086
+
1087
+ @register_default(types.misc.SliceLiteral)
1088
+ @register_default(types.SliceType)
1089
+ class SliceModel(StructModel):
1090
+ def __init__(self, dmm, fe_type):
1091
+ members = [('start', types.intp),
1092
+ ('stop', types.intp),
1093
+ ('step', types.intp),
1094
+ ]
1095
+ super(SliceModel, self).__init__(dmm, fe_type, members)
1096
+
1097
+
1098
+ @register_default(types.NPDatetime)
1099
+ @register_default(types.NPTimedelta)
1100
+ class NPDatetimeModel(PrimitiveModel):
1101
+ def __init__(self, dmm, fe_type):
1102
+ be_type = ir.IntType(64)
1103
+ super(NPDatetimeModel, self).__init__(dmm, fe_type, be_type)
1104
+
1105
+
1106
+ @register_default(types.ArrayIterator)
1107
+ class ArrayIterator(StructModel):
1108
+ def __init__(self, dmm, fe_type):
1109
+ # We use an unsigned index to avoid the cost of negative index tests.
1110
+ members = [('index', types.EphemeralPointer(types.uintp)),
1111
+ ('array', fe_type.array_type)]
1112
+ super(ArrayIterator, self).__init__(dmm, fe_type, members)
1113
+
1114
+
1115
+ @register_default(types.EnumerateType)
1116
+ class EnumerateType(StructModel):
1117
+ def __init__(self, dmm, fe_type):
1118
+ members = [('count', types.EphemeralPointer(types.intp)),
1119
+ ('iter', fe_type.source_type)]
1120
+
1121
+ super(EnumerateType, self).__init__(dmm, fe_type, members)
1122
+
1123
+
1124
+ @register_default(types.ZipType)
1125
+ class ZipType(StructModel):
1126
+ def __init__(self, dmm, fe_type):
1127
+ members = [('iter%d' % i, source_type.iterator_type)
1128
+ for i, source_type in enumerate(fe_type.source_types)]
1129
+ super(ZipType, self).__init__(dmm, fe_type, members)
1130
+
1131
+
1132
+ @register_default(types.RangeIteratorType)
1133
+ class RangeIteratorType(StructModel):
1134
+ def __init__(self, dmm, fe_type):
1135
+ int_type = fe_type.yield_type
1136
+ members = [('iter', types.EphemeralPointer(int_type)),
1137
+ ('stop', int_type),
1138
+ ('step', int_type),
1139
+ ('count', types.EphemeralPointer(int_type))]
1140
+ super(RangeIteratorType, self).__init__(dmm, fe_type, members)
1141
+
1142
+
1143
+ @register_default(types.Generator)
1144
+ class GeneratorModel(CompositeModel):
1145
+ def __init__(self, dmm, fe_type):
1146
+ super(GeneratorModel, self).__init__(dmm, fe_type)
1147
+ # XXX Fold this in DataPacker?
1148
+ self._arg_models = [self._dmm.lookup(t) for t in fe_type.arg_types
1149
+ if not isinstance(t, types.Omitted)]
1150
+ self._state_models = [self._dmm.lookup(t) for t in fe_type.state_types]
1151
+
1152
+ self._args_be_type = ir.LiteralStructType(
1153
+ [t.get_data_type() for t in self._arg_models])
1154
+ self._state_be_type = ir.LiteralStructType(
1155
+ [t.get_data_type() for t in self._state_models])
1156
+ # The whole generator closure
1157
+ self._be_type = ir.LiteralStructType(
1158
+ [self._dmm.lookup(types.int32).get_value_type(),
1159
+ self._args_be_type, self._state_be_type])
1160
+ self._be_ptr_type = self._be_type.as_pointer()
1161
+
1162
+ def get_value_type(self):
1163
+ """
1164
+ The generator closure is passed around as a reference.
1165
+ """
1166
+ return self._be_ptr_type
1167
+
1168
+ def get_argument_type(self):
1169
+ return self._be_ptr_type
1170
+
1171
+ def get_return_type(self):
1172
+ return self._be_type
1173
+
1174
+ def get_data_type(self):
1175
+ return self._be_type
1176
+
1177
+ def as_argument(self, builder, value):
1178
+ return value
1179
+
1180
+ def from_argument(self, builder, value):
1181
+ return value
1182
+
1183
+ def as_return(self, builder, value):
1184
+ return self.as_data(builder, value)
1185
+
1186
+ def from_return(self, builder, value):
1187
+ return self.from_data(builder, value)
1188
+
1189
+ def as_data(self, builder, value):
1190
+ return builder.load(value)
1191
+
1192
+ def from_data(self, builder, value):
1193
+ stack = cgutils.alloca_once(builder, value.type)
1194
+ builder.store(value, stack)
1195
+ return stack
1196
+
1197
+
1198
+ @register_default(types.ArrayCTypes)
1199
+ class ArrayCTypesModel(StructModel):
1200
+ def __init__(self, dmm, fe_type):
1201
+ # ndim = fe_type.ndim
1202
+ members = [('data', types.CPointer(fe_type.dtype)),
1203
+ ('meminfo', types.MemInfoPointer(fe_type.dtype))]
1204
+ super(ArrayCTypesModel, self).__init__(dmm, fe_type, members)
1205
+
1206
+
1207
+ @register_default(types.RangeType)
1208
+ class RangeModel(StructModel):
1209
+ def __init__(self, dmm, fe_type):
1210
+ int_type = fe_type.iterator_type.yield_type
1211
+ members = [('start', int_type),
1212
+ ('stop', int_type),
1213
+ ('step', int_type)]
1214
+ super(RangeModel, self).__init__(dmm, fe_type, members)
1215
+
1216
+
1217
+ # =============================================================================
1218
+
1219
+ @register_default(types.NumpyNdIndexType)
1220
+ class NdIndexModel(StructModel):
1221
+ def __init__(self, dmm, fe_type):
1222
+ ndim = fe_type.ndim
1223
+ members = [('shape', types.UniTuple(types.intp, ndim)),
1224
+ ('indices', types.EphemeralArray(types.intp, ndim)),
1225
+ ('exhausted', types.EphemeralPointer(types.boolean)),
1226
+ ]
1227
+ super(NdIndexModel, self).__init__(dmm, fe_type, members)
1228
+
1229
+
1230
+ @register_default(types.NumpyFlatType)
1231
+ def handle_numpy_flat_type(dmm, ty):
1232
+ if ty.array_type.layout == 'C':
1233
+ return CContiguousFlatIter(dmm, ty, need_indices=False)
1234
+ else:
1235
+ return FlatIter(dmm, ty)
1236
+
1237
+ @register_default(types.NumpyNdEnumerateType)
1238
+ def handle_numpy_ndenumerate_type(dmm, ty):
1239
+ if ty.array_type.layout == 'C':
1240
+ return CContiguousFlatIter(dmm, ty, need_indices=True)
1241
+ else:
1242
+ return FlatIter(dmm, ty)
1243
+
1244
+ @register_default(types.BoundFunction)
1245
+ def handle_bound_function(dmm, ty):
1246
+ # The same as the underlying type
1247
+ return dmm[ty.this]
1248
+
1249
+
1250
+ @register_default(types.NumpyNdIterType)
1251
+ class NdIter(StructModel):
1252
+ def __init__(self, dmm, fe_type):
1253
+ array_types = fe_type.arrays
1254
+ ndim = fe_type.ndim
1255
+ shape_len = ndim if fe_type.need_shaped_indexing else 1
1256
+ members = [('exhausted', types.EphemeralPointer(types.boolean)),
1257
+ ('arrays', types.Tuple(array_types)),
1258
+ # The iterator's main shape and indices
1259
+ ('shape', types.UniTuple(types.intp, shape_len)),
1260
+ ('indices', types.EphemeralArray(types.intp, shape_len)),
1261
+ ]
1262
+ # Indexing state for the various sub-iterators
1263
+ # XXX use a tuple instead?
1264
+ for i, sub in enumerate(fe_type.indexers):
1265
+ kind, start_dim, end_dim, _ = sub
1266
+ member_name = 'index%d' % i
1267
+ if kind == 'flat':
1268
+ # A single index into the flattened array
1269
+ members.append((member_name, types.EphemeralPointer(types.intp)))
1270
+ elif kind in ('scalar', 'indexed', '0d'):
1271
+ # Nothing required
1272
+ pass
1273
+ else:
1274
+ assert 0
1275
+ # Slots holding values of the scalar args
1276
+ # XXX use a tuple instead?
1277
+ for i, ty in enumerate(fe_type.arrays):
1278
+ if not isinstance(ty, types.Array):
1279
+ member_name = 'scalar%d' % i
1280
+ members.append((member_name, types.EphemeralPointer(ty)))
1281
+
1282
+ super(NdIter, self).__init__(dmm, fe_type, members)
1283
+
1284
+
1285
+ @register_default(types.DeferredType)
1286
+ class DeferredStructModel(CompositeModel):
1287
+ def __init__(self, dmm, fe_type):
1288
+ super(DeferredStructModel, self).__init__(dmm, fe_type)
1289
+ self.typename = "deferred.{0}".format(id(fe_type))
1290
+ self.actual_fe_type = fe_type.get()
1291
+
1292
+ def get_value_type(self):
1293
+ return ir.global_context.get_identified_type(self.typename + '.value')
1294
+
1295
+ def get_data_type(self):
1296
+ return ir.global_context.get_identified_type(self.typename + '.data')
1297
+
1298
+ def get_argument_type(self):
1299
+ return self._actual_model.get_argument_type()
1300
+
1301
+ def as_argument(self, builder, value):
1302
+ inner = self.get(builder, value)
1303
+ return self._actual_model.as_argument(builder, inner)
1304
+
1305
+ def from_argument(self, builder, value):
1306
+ res = self._actual_model.from_argument(builder, value)
1307
+ return self.set(builder, self.make_uninitialized(), res)
1308
+
1309
+ def from_data(self, builder, value):
1310
+ self._define()
1311
+ elem = self.get(builder, value)
1312
+ value = self._actual_model.from_data(builder, elem)
1313
+ out = self.make_uninitialized()
1314
+ return self.set(builder, out, value)
1315
+
1316
+ def as_data(self, builder, value):
1317
+ self._define()
1318
+ elem = self.get(builder, value)
1319
+ value = self._actual_model.as_data(builder, elem)
1320
+ out = self.make_uninitialized(kind='data')
1321
+ return self.set(builder, out, value)
1322
+
1323
+ def from_return(self, builder, value):
1324
+ return value
1325
+
1326
+ def as_return(self, builder, value):
1327
+ return value
1328
+
1329
+ def get(self, builder, value):
1330
+ return builder.extract_value(value, [0])
1331
+
1332
+ def set(self, builder, value, content):
1333
+ return builder.insert_value(value, content, [0])
1334
+
1335
+ def make_uninitialized(self, kind='value'):
1336
+ self._define()
1337
+ if kind == 'value':
1338
+ ty = self.get_value_type()
1339
+ else:
1340
+ ty = self.get_data_type()
1341
+ return ir.Constant(ty, ir.Undefined)
1342
+
1343
+ def _define(self):
1344
+ valty = self.get_value_type()
1345
+ self._define_value_type(valty)
1346
+ datty = self.get_data_type()
1347
+ self._define_data_type(datty)
1348
+
1349
+ def _define_value_type(self, value_type):
1350
+ if value_type.is_opaque:
1351
+ value_type.set_body(self._actual_model.get_value_type())
1352
+
1353
+ def _define_data_type(self, data_type):
1354
+ if data_type.is_opaque:
1355
+ data_type.set_body(self._actual_model.get_data_type())
1356
+
1357
+ @property
1358
+ def _actual_model(self):
1359
+ return self._dmm.lookup(self.actual_fe_type)
1360
+
1361
+ def traverse(self, builder):
1362
+ return [(self.actual_fe_type,
1363
+ lambda value: builder.extract_value(value, [0]))]
1364
+
1365
+
1366
+ @register_default(types.StructRefPayload)
1367
+ class StructPayloadModel(StructModel):
1368
+ """Model for the payload of a mutable struct
1369
+ """
1370
+ def __init__(self, dmm, fe_typ):
1371
+ members = tuple(fe_typ.field_dict.items())
1372
+ super().__init__(dmm, fe_typ, members)
1373
+
1374
+
1375
+ class StructRefModel(StructModel):
1376
+ """Model for a mutable struct.
1377
+ A reference to the payload
1378
+ """
1379
+ def __init__(self, dmm, fe_typ):
1380
+ dtype = fe_typ.get_data_type()
1381
+ members = [
1382
+ ("meminfo", types.MemInfoPointer(dtype)),
1383
+ ]
1384
+ super().__init__(dmm, fe_typ, members)
1385
+
lib/python3.10/site-packages/numba/core/datamodel/packer.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import deque
2
+
3
+ from numba.core import types, cgutils
4
+
5
+
6
+
7
+ class DataPacker(object):
8
+ """
9
+ A helper to pack a number of typed arguments into a data structure.
10
+ Omitted arguments (i.e. values with the type `Omitted`) are automatically
11
+ skipped.
12
+ """
13
+ # XXX should DataPacker be a model for a dedicated type?
14
+
15
+ def __init__(self, dmm, fe_types):
16
+ self._dmm = dmm
17
+ self._fe_types = fe_types
18
+ self._models = [dmm.lookup(ty) for ty in fe_types]
19
+
20
+ self._pack_map = []
21
+ self._be_types = []
22
+ for i, ty in enumerate(fe_types):
23
+ if not isinstance(ty, types.Omitted):
24
+ self._pack_map.append(i)
25
+ self._be_types.append(self._models[i].get_data_type())
26
+
27
+ def as_data(self, builder, values):
28
+ """
29
+ Return the given values packed as a data structure.
30
+ """
31
+ elems = [self._models[i].as_data(builder, values[i])
32
+ for i in self._pack_map]
33
+ return cgutils.make_anonymous_struct(builder, elems)
34
+
35
+ def _do_load(self, builder, ptr, formal_list=None):
36
+ res = []
37
+ for i, i_formal in enumerate(self._pack_map):
38
+ elem_ptr = cgutils.gep_inbounds(builder, ptr, 0, i)
39
+ val = self._models[i_formal].load_from_data_pointer(builder, elem_ptr)
40
+ if formal_list is None:
41
+ res.append((self._fe_types[i_formal], val))
42
+ else:
43
+ formal_list[i_formal] = val
44
+ return res
45
+
46
+ def load(self, builder, ptr):
47
+ """
48
+ Load the packed values and return a (type, value) tuples.
49
+ """
50
+ return self._do_load(builder, ptr)
51
+
52
+ def load_into(self, builder, ptr, formal_list):
53
+ """
54
+ Load the packed values into a sequence indexed by formal
55
+ argument number (skipping any Omitted position).
56
+ """
57
+ self._do_load(builder, ptr, formal_list)
58
+
59
+
60
+ class ArgPacker(object):
61
+ """
62
+ Compute the position for each high-level typed argument.
63
+ It flattens every composite argument into primitive types.
64
+ It maintains a position map for unflattening the arguments.
65
+
66
+ Since struct (esp. nested struct) have specific ABI requirements (e.g.
67
+ alignment, pointer address-space, ...) in different architecture (e.g.
68
+ OpenCL, CUDA), flattening composite argument types simplifes the call
69
+ setup from the Python side. Functions are receiving simple primitive
70
+ types and there are only a handful of these.
71
+ """
72
+
73
+ def __init__(self, dmm, fe_args):
74
+ self._dmm = dmm
75
+ self._fe_args = fe_args
76
+ self._nargs = len(fe_args)
77
+
78
+ self._dm_args = []
79
+ argtys = []
80
+ for ty in fe_args:
81
+ dm = self._dmm.lookup(ty)
82
+ self._dm_args.append(dm)
83
+ argtys.append(dm.get_argument_type())
84
+ self._unflattener = _Unflattener(argtys)
85
+ self._be_args = list(_flatten(argtys))
86
+
87
+ def as_arguments(self, builder, values):
88
+ """Flatten all argument values
89
+ """
90
+ if len(values) != self._nargs:
91
+ raise TypeError("invalid number of args: expected %d, got %d"
92
+ % (self._nargs, len(values)))
93
+
94
+ if not values:
95
+ return ()
96
+
97
+ args = [dm.as_argument(builder, val)
98
+ for dm, val in zip(self._dm_args, values)
99
+ ]
100
+
101
+ args = tuple(_flatten(args))
102
+ return args
103
+
104
+ def from_arguments(self, builder, args):
105
+ """Unflatten all argument values
106
+ """
107
+
108
+ valtree = self._unflattener.unflatten(args)
109
+ values = [dm.from_argument(builder, val)
110
+ for dm, val in zip(self._dm_args, valtree)
111
+ ]
112
+
113
+ return values
114
+
115
+ def assign_names(self, args, names):
116
+ """Assign names for each flattened argument values.
117
+ """
118
+
119
+ valtree = self._unflattener.unflatten(args)
120
+ for aval, aname in zip(valtree, names):
121
+ self._assign_names(aval, aname)
122
+
123
+ def _assign_names(self, val_or_nested, name, depth=()):
124
+ if isinstance(val_or_nested, (tuple, list)):
125
+ for pos, aval in enumerate(val_or_nested):
126
+ self._assign_names(aval, name, depth=depth + (pos,))
127
+ else:
128
+ postfix = '.'.join(map(str, depth))
129
+ parts = [name, postfix]
130
+ val_or_nested.name = '.'.join(filter(bool, parts))
131
+
132
+ @property
133
+ def argument_types(self):
134
+ """Return a list of LLVM types that are results of flattening
135
+ composite types.
136
+ """
137
+ return tuple(ty for ty in self._be_args if ty != ())
138
+
139
+
140
+ def _flatten(iterable):
141
+ """
142
+ Flatten nested iterable of (tuple, list).
143
+ """
144
+ def rec(iterable):
145
+ for i in iterable:
146
+ if isinstance(i, (tuple, list)):
147
+ for j in rec(i):
148
+ yield j
149
+ else:
150
+ yield i
151
+ return rec(iterable)
152
+
153
+
154
+ _PUSH_LIST = 1
155
+ _APPEND_NEXT_VALUE = 2
156
+ _APPEND_EMPTY_TUPLE = 3
157
+ _POP = 4
158
+
159
+ class _Unflattener(object):
160
+ """
161
+ An object used to unflatten nested sequences after a given pattern
162
+ (an arbitrarily nested sequence).
163
+ The pattern shows the nested sequence shape desired when unflattening;
164
+ the values it contains are irrelevant.
165
+ """
166
+
167
+ def __init__(self, pattern):
168
+ self._code = self._build_unflatten_code(pattern)
169
+
170
+ def _build_unflatten_code(self, iterable):
171
+ """Build the unflatten opcode sequence for the given *iterable* structure
172
+ (an iterable of nested sequences).
173
+ """
174
+ code = []
175
+ def rec(iterable):
176
+ for i in iterable:
177
+ if isinstance(i, (tuple, list)):
178
+ if len(i) > 0:
179
+ code.append(_PUSH_LIST)
180
+ rec(i)
181
+ code.append(_POP)
182
+ else:
183
+ code.append(_APPEND_EMPTY_TUPLE)
184
+ else:
185
+ code.append(_APPEND_NEXT_VALUE)
186
+
187
+ rec(iterable)
188
+ return code
189
+
190
+ def unflatten(self, flatiter):
191
+ """Rebuild a nested tuple structure.
192
+ """
193
+ vals = deque(flatiter)
194
+
195
+ res = []
196
+ cur = res
197
+ stack = []
198
+ for op in self._code:
199
+ if op is _PUSH_LIST:
200
+ stack.append(cur)
201
+ cur.append([])
202
+ cur = cur[-1]
203
+ elif op is _APPEND_NEXT_VALUE:
204
+ cur.append(vals.popleft())
205
+ elif op is _APPEND_EMPTY_TUPLE:
206
+ cur.append(())
207
+ elif op is _POP:
208
+ cur = stack.pop()
209
+
210
+ assert not stack, stack
211
+ assert not vals, vals
212
+
213
+ return res
lib/python3.10/site-packages/numba/core/datamodel/registry.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ from .manager import DataModelManager
3
+
4
+
5
+ def register(dmm, typecls):
6
+ """Used as decorator to simplify datamodel registration.
7
+ Returns the object being decorated so that chaining is possible.
8
+ """
9
+ def wraps(fn):
10
+ dmm.register(typecls, fn)
11
+ return fn
12
+
13
+ return wraps
14
+
15
+
16
+ default_manager = DataModelManager()
17
+
18
+ register_default = functools.partial(register, default_manager)
lib/python3.10/site-packages/numba/core/datamodel/testing.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from llvmlite import ir
2
+ from llvmlite import binding as ll
3
+
4
+ from numba.core import datamodel
5
+ import unittest
6
+
7
+
8
+ class DataModelTester(unittest.TestCase):
9
+ """
10
+ Test the implementation of a DataModel for a frontend type.
11
+ """
12
+ fe_type = NotImplemented
13
+
14
+ def setUp(self):
15
+ self.module = ir.Module()
16
+ self.datamodel = datamodel.default_manager[self.fe_type]
17
+
18
+ def test_as_arg(self):
19
+ """
20
+ - Is as_arg() and from_arg() implemented?
21
+ - Are they the inverse of each other?
22
+ """
23
+ fnty = ir.FunctionType(ir.VoidType(), [])
24
+ function = ir.Function(self.module, fnty, name="test_as_arg")
25
+ builder = ir.IRBuilder()
26
+ builder.position_at_end(function.append_basic_block())
27
+
28
+ undef_value = ir.Constant(self.datamodel.get_value_type(), None)
29
+ args = self.datamodel.as_argument(builder, undef_value)
30
+ self.assertIsNot(args, NotImplemented, "as_argument returned "
31
+ "NotImplementedError")
32
+
33
+ if isinstance(args, (tuple, list)):
34
+ def recur_tuplize(args, func=None):
35
+ for arg in args:
36
+ if isinstance(arg, (tuple, list)):
37
+ yield tuple(recur_tuplize(arg, func=func))
38
+ else:
39
+ if func is None:
40
+ yield arg
41
+ else:
42
+ yield func(arg)
43
+
44
+ argtypes = tuple(recur_tuplize(args, func=lambda x: x.type))
45
+ exptypes = tuple(recur_tuplize(
46
+ self.datamodel.get_argument_type()))
47
+ self.assertEqual(exptypes, argtypes)
48
+ else:
49
+ self.assertEqual(args.type,
50
+ self.datamodel.get_argument_type())
51
+
52
+ rev_value = self.datamodel.from_argument(builder, args)
53
+ self.assertEqual(rev_value.type, self.datamodel.get_value_type())
54
+
55
+ builder.ret_void() # end function
56
+
57
+ # Ensure valid LLVM generation
58
+ materialized = ll.parse_assembly(str(self.module))
59
+ str(materialized)
60
+
61
+ def test_as_return(self):
62
+ """
63
+ - Is as_return() and from_return() implemented?
64
+ - Are they the inverse of each other?
65
+ """
66
+ fnty = ir.FunctionType(ir.VoidType(), [])
67
+ function = ir.Function(self.module, fnty, name="test_as_return")
68
+ builder = ir.IRBuilder()
69
+ builder.position_at_end(function.append_basic_block())
70
+
71
+ undef_value = ir.Constant(self.datamodel.get_value_type(), None)
72
+ ret = self.datamodel.as_return(builder, undef_value)
73
+ self.assertIsNot(ret, NotImplemented, "as_return returned "
74
+ "NotImplementedError")
75
+
76
+ self.assertEqual(ret.type, self.datamodel.get_return_type())
77
+
78
+ rev_value = self.datamodel.from_return(builder, ret)
79
+ self.assertEqual(rev_value.type, self.datamodel.get_value_type())
80
+
81
+ builder.ret_void() # end function
82
+
83
+ # Ensure valid LLVM generation
84
+ materialized = ll.parse_assembly(str(self.module))
85
+ str(materialized)
86
+
87
+
88
+ class SupportAsDataMixin(object):
89
+ """Test as_data() and from_data()
90
+ """
91
+ # XXX test load_from_data_pointer() as well
92
+
93
+ def test_as_data(self):
94
+ fnty = ir.FunctionType(ir.VoidType(), [])
95
+ function = ir.Function(self.module, fnty, name="test_as_data")
96
+ builder = ir.IRBuilder()
97
+ builder.position_at_end(function.append_basic_block())
98
+
99
+ undef_value = ir.Constant(self.datamodel.get_value_type(), None)
100
+ data = self.datamodel.as_data(builder, undef_value)
101
+ self.assertIsNot(data, NotImplemented,
102
+ "as_data returned NotImplemented")
103
+
104
+ self.assertEqual(data.type, self.datamodel.get_data_type())
105
+
106
+ rev_value = self.datamodel.from_data(builder, data)
107
+ self.assertEqual(rev_value.type,
108
+ self.datamodel.get_value_type())
109
+
110
+ builder.ret_void() # end function
111
+
112
+ # Ensure valid LLVM generation
113
+ materialized = ll.parse_assembly(str(self.module))
114
+ str(materialized)
115
+
116
+
117
+ class NotSupportAsDataMixin(object):
118
+ """Ensure as_data() and from_data() raise NotImplementedError.
119
+ """
120
+
121
+ def test_as_data_not_supported(self):
122
+ fnty = ir.FunctionType(ir.VoidType(), [])
123
+ function = ir.Function(self.module, fnty, name="test_as_data")
124
+ builder = ir.IRBuilder()
125
+ builder.position_at_end(function.append_basic_block())
126
+
127
+ undef_value = ir.Constant(self.datamodel.get_value_type(), None)
128
+ with self.assertRaises(NotImplementedError):
129
+ data = self.datamodel.as_data(builder, undef_value)
130
+ with self.assertRaises(NotImplementedError):
131
+ rev_data = self.datamodel.from_data(builder, undef_value)
132
+
133
+
134
+ class DataModelTester_SupportAsDataMixin(DataModelTester,
135
+ SupportAsDataMixin):
136
+ pass
137
+
138
+
139
+ class DataModelTester_NotSupportAsDataMixin(DataModelTester,
140
+ NotSupportAsDataMixin):
141
+ pass
142
+
143
+
144
+ def test_factory(support_as_data=True):
145
+ """A helper for returning a unittest TestCase for testing
146
+ """
147
+ if support_as_data:
148
+ return DataModelTester_SupportAsDataMixin
149
+ else:
150
+ return DataModelTester_NotSupportAsDataMixin
lib/python3.10/site-packages/numba/core/rewrites/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ """
2
+ A subpackage hosting Numba IR rewrite passes.
3
+ """
4
+
5
+ from .registry import register_rewrite, rewrite_registry, Rewrite
6
+ # Register various built-in rewrite passes
7
+ from numba.core.rewrites import (static_getitem, static_raise, static_binop,
8
+ ir_print)
lib/python3.10/site-packages/numba/core/rewrites/ir_print.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from numba.core import errors, ir
2
+ from numba.core.rewrites import register_rewrite, Rewrite
3
+
4
+
5
+ @register_rewrite('before-inference')
6
+ class RewritePrintCalls(Rewrite):
7
+ """
8
+ Rewrite calls to the print() global function to dedicated IR print() nodes.
9
+ """
10
+
11
+ def match(self, func_ir, block, typemap, calltypes):
12
+ self.prints = prints = {}
13
+ self.block = block
14
+ # Find all assignments with a right-hand print() call
15
+ for inst in block.find_insts(ir.Assign):
16
+ if isinstance(inst.value, ir.Expr) and inst.value.op == 'call':
17
+ expr = inst.value
18
+ try:
19
+ callee = func_ir.infer_constant(expr.func)
20
+ except errors.ConstantInferenceError:
21
+ continue
22
+ if callee is print:
23
+ if expr.kws:
24
+ # Only positional args are supported
25
+ msg = ("Numba's print() function implementation does not "
26
+ "support keyword arguments.")
27
+ raise errors.UnsupportedError(msg, inst.loc)
28
+ prints[inst] = expr
29
+ return len(prints) > 0
30
+
31
+ def apply(self):
32
+ """
33
+ Rewrite `var = call <print function>(...)` as a sequence of
34
+ `print(...)` and `var = const(None)`.
35
+ """
36
+ new_block = self.block.copy()
37
+ new_block.clear()
38
+ for inst in self.block.body:
39
+ if inst in self.prints:
40
+ expr = self.prints[inst]
41
+ print_node = ir.Print(args=expr.args, vararg=expr.vararg,
42
+ loc=expr.loc)
43
+ new_block.append(print_node)
44
+ assign_node = ir.Assign(value=ir.Const(None, loc=expr.loc),
45
+ target=inst.target,
46
+ loc=inst.loc)
47
+ new_block.append(assign_node)
48
+ else:
49
+ new_block.append(inst)
50
+ return new_block
51
+
52
+
53
+ @register_rewrite('before-inference')
54
+ class DetectConstPrintArguments(Rewrite):
55
+ """
56
+ Detect and store constant arguments to print() nodes.
57
+ """
58
+
59
+ def match(self, func_ir, block, typemap, calltypes):
60
+ self.consts = consts = {}
61
+ self.block = block
62
+ for inst in block.find_insts(ir.Print):
63
+ if inst.consts:
64
+ # Already rewritten
65
+ continue
66
+ for idx, var in enumerate(inst.args):
67
+ try:
68
+ const = func_ir.infer_constant(var)
69
+ except errors.ConstantInferenceError:
70
+ continue
71
+ consts.setdefault(inst, {})[idx] = const
72
+
73
+ return len(consts) > 0
74
+
75
+ def apply(self):
76
+ """
77
+ Store detected constant arguments on their nodes.
78
+ """
79
+ for inst in self.block.body:
80
+ if inst in self.consts:
81
+ inst.consts = self.consts[inst]
82
+ return self.block
lib/python3.10/site-packages/numba/core/rewrites/registry.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import defaultdict
2
+
3
+ from numba.core import config
4
+
5
+
6
+ class Rewrite(object):
7
+ '''Defines the abstract base class for Numba rewrites.
8
+ '''
9
+
10
+ def __init__(self, state=None):
11
+ '''Constructor for the Rewrite class.
12
+ '''
13
+ pass
14
+
15
+ def match(self, func_ir, block, typemap, calltypes):
16
+ '''Overload this method to check an IR block for matching terms in the
17
+ rewrite.
18
+ '''
19
+ return False
20
+
21
+ def apply(self):
22
+ '''Overload this method to return a rewritten IR basic block when a
23
+ match has been found.
24
+ '''
25
+ raise NotImplementedError("Abstract Rewrite.apply() called!")
26
+
27
+
28
+ class RewriteRegistry(object):
29
+ '''Defines a registry for Numba rewrites.
30
+ '''
31
+ _kinds = frozenset(['before-inference', 'after-inference'])
32
+
33
+ def __init__(self):
34
+ '''Constructor for the rewrite registry. Initializes the rewrites
35
+ member to an empty list.
36
+ '''
37
+ self.rewrites = defaultdict(list)
38
+
39
+ def register(self, kind):
40
+ """
41
+ Decorator adding a subclass of Rewrite to the registry for
42
+ the given *kind*.
43
+ """
44
+ if kind not in self._kinds:
45
+ raise KeyError("invalid kind %r" % (kind,))
46
+ def do_register(rewrite_cls):
47
+ if not issubclass(rewrite_cls, Rewrite):
48
+ raise TypeError('{0} is not a subclass of Rewrite'.format(
49
+ rewrite_cls))
50
+ self.rewrites[kind].append(rewrite_cls)
51
+ return rewrite_cls
52
+ return do_register
53
+
54
+ def apply(self, kind, state):
55
+ '''Given a pipeline and a dictionary of basic blocks, exhaustively
56
+ attempt to apply all registered rewrites to all basic blocks.
57
+ '''
58
+ assert kind in self._kinds
59
+ blocks = state.func_ir.blocks
60
+ old_blocks = blocks.copy()
61
+ for rewrite_cls in self.rewrites[kind]:
62
+ # Exhaustively apply a rewrite until it stops matching.
63
+ rewrite = rewrite_cls(state)
64
+ work_list = list(blocks.items())
65
+ while work_list:
66
+ key, block = work_list.pop()
67
+ matches = rewrite.match(state.func_ir, block, state.typemap,
68
+ state.calltypes)
69
+ if matches:
70
+ if config.DEBUG or config.DUMP_IR:
71
+ print("_" * 70)
72
+ print("REWRITING (%s):" % rewrite_cls.__name__)
73
+ block.dump()
74
+ print("_" * 60)
75
+ new_block = rewrite.apply()
76
+ blocks[key] = new_block
77
+ work_list.append((key, new_block))
78
+ if config.DEBUG or config.DUMP_IR:
79
+ new_block.dump()
80
+ print("_" * 70)
81
+ # If any blocks were changed, perform a sanity check.
82
+ for key, block in blocks.items():
83
+ if block != old_blocks[key]:
84
+ block.verify()
85
+
86
+ # Some passes, e.g. _inline_const_arraycall are known to occasionally
87
+ # do invalid things WRT ir.Del, others, e.g. RewriteArrayExprs do valid
88
+ # things with ir.Del, but the placement is not optimal. The lines below
89
+ # fix-up the IR so that ref counts are valid and optimally placed,
90
+ # see #4093 for context. This has to be run here opposed to in
91
+ # apply() as the CFG needs computing so full IR is needed.
92
+ from numba.core import postproc
93
+ post_proc = postproc.PostProcessor(state.func_ir)
94
+ post_proc.run()
95
+
96
+
97
+ rewrite_registry = RewriteRegistry()
98
+ register_rewrite = rewrite_registry.register
lib/python3.10/site-packages/numba/core/rewrites/static_binop.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from numba.core import errors, ir
2
+ from numba.core.rewrites import register_rewrite, Rewrite
3
+
4
+
5
+ @register_rewrite('before-inference')
6
+ class DetectStaticBinops(Rewrite):
7
+ """
8
+ Detect constant arguments to select binops.
9
+ """
10
+
11
+ # Those operators can benefit from a constant-inferred argument
12
+ rhs_operators = {'**'}
13
+
14
+ def match(self, func_ir, block, typemap, calltypes):
15
+ self.static_lhs = {}
16
+ self.static_rhs = {}
17
+ self.block = block
18
+ # Find binop expressions with a constant lhs or rhs
19
+ for expr in block.find_exprs(op='binop'):
20
+ try:
21
+ if (expr.fn in self.rhs_operators
22
+ and expr.static_rhs is ir.UNDEFINED):
23
+ self.static_rhs[expr] = func_ir.infer_constant(expr.rhs)
24
+ except errors.ConstantInferenceError:
25
+ continue
26
+
27
+ return len(self.static_lhs) > 0 or len(self.static_rhs) > 0
28
+
29
+ def apply(self):
30
+ """
31
+ Store constant arguments that were detected in match().
32
+ """
33
+ for expr, rhs in self.static_rhs.items():
34
+ expr.static_rhs = rhs
35
+ return self.block
lib/python3.10/site-packages/numba/core/rewrites/static_getitem.py ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from numba.core import errors, ir, types
2
+ from numba.core.rewrites import register_rewrite, Rewrite
3
+
4
+
5
+ @register_rewrite('before-inference')
6
+ class RewriteConstGetitems(Rewrite):
7
+ """
8
+ Rewrite IR expressions of the kind `getitem(value=arr, index=$constXX)`
9
+ where `$constXX` is a known constant as
10
+ `static_getitem(value=arr, index=<constant value>)`.
11
+ """
12
+
13
+ def match(self, func_ir, block, typemap, calltypes):
14
+ self.getitems = getitems = {}
15
+ self.block = block
16
+ # Detect all getitem expressions and find which ones can be
17
+ # rewritten
18
+ for expr in block.find_exprs(op='getitem'):
19
+ if expr.op == 'getitem':
20
+ try:
21
+ const = func_ir.infer_constant(expr.index)
22
+ except errors.ConstantInferenceError:
23
+ continue
24
+ getitems[expr] = const
25
+
26
+ return len(getitems) > 0
27
+
28
+ def apply(self):
29
+ """
30
+ Rewrite all matching getitems as static_getitems.
31
+ """
32
+ new_block = self.block.copy()
33
+ new_block.clear()
34
+ for inst in self.block.body:
35
+ if isinstance(inst, ir.Assign):
36
+ expr = inst.value
37
+ if expr in self.getitems:
38
+ const = self.getitems[expr]
39
+ new_expr = ir.Expr.static_getitem(value=expr.value,
40
+ index=const,
41
+ index_var=expr.index,
42
+ loc=expr.loc)
43
+ inst = ir.Assign(value=new_expr, target=inst.target,
44
+ loc=inst.loc)
45
+ new_block.append(inst)
46
+ return new_block
47
+
48
+
49
+ @register_rewrite('after-inference')
50
+ class RewriteStringLiteralGetitems(Rewrite):
51
+ """
52
+ Rewrite IR expressions of the kind `getitem(value=arr, index=$XX)`
53
+ where `$XX` is a StringLiteral value as
54
+ `static_getitem(value=arr, index=<literal value>)`.
55
+ """
56
+
57
+ def match(self, func_ir, block, typemap, calltypes):
58
+ """
59
+ Detect all getitem expressions and find which ones have
60
+ string literal indexes
61
+ """
62
+ self.getitems = getitems = {}
63
+ self.block = block
64
+ self.calltypes = calltypes
65
+ for expr in block.find_exprs(op='getitem'):
66
+ if expr.op == 'getitem':
67
+ index_ty = typemap[expr.index.name]
68
+ if isinstance(index_ty, types.StringLiteral):
69
+ getitems[expr] = (expr.index, index_ty.literal_value)
70
+
71
+ return len(getitems) > 0
72
+
73
+ def apply(self):
74
+ """
75
+ Rewrite all matching getitems as static_getitems where the index
76
+ is the literal value of the string.
77
+ """
78
+ new_block = ir.Block(self.block.scope, self.block.loc)
79
+ for inst in self.block.body:
80
+ if isinstance(inst, ir.Assign):
81
+ expr = inst.value
82
+ if expr in self.getitems:
83
+ const, lit_val = self.getitems[expr]
84
+ new_expr = ir.Expr.static_getitem(value=expr.value,
85
+ index=lit_val,
86
+ index_var=expr.index,
87
+ loc=expr.loc)
88
+ self.calltypes[new_expr] = self.calltypes[expr]
89
+ inst = ir.Assign(value=new_expr, target=inst.target,
90
+ loc=inst.loc)
91
+ new_block.append(inst)
92
+ return new_block
93
+
94
+
95
+ @register_rewrite('after-inference')
96
+ class RewriteStringLiteralSetitems(Rewrite):
97
+ """
98
+ Rewrite IR expressions of the kind `setitem(value=arr, index=$XX, value=)`
99
+ where `$XX` is a StringLiteral value as
100
+ `static_setitem(value=arr, index=<literal value>, value=)`.
101
+ """
102
+
103
+ def match(self, func_ir, block, typemap, calltypes):
104
+ """
105
+ Detect all setitem expressions and find which ones have
106
+ string literal indexes
107
+ """
108
+ self.setitems = setitems = {}
109
+ self.block = block
110
+ self.calltypes = calltypes
111
+ for inst in block.find_insts(ir.SetItem):
112
+ index_ty = typemap[inst.index.name]
113
+ if isinstance(index_ty, types.StringLiteral):
114
+ setitems[inst] = (inst.index, index_ty.literal_value)
115
+
116
+ return len(setitems) > 0
117
+
118
+ def apply(self):
119
+ """
120
+ Rewrite all matching setitems as static_setitems where the index
121
+ is the literal value of the string.
122
+ """
123
+ new_block = ir.Block(self.block.scope, self.block.loc)
124
+ for inst in self.block.body:
125
+ if isinstance(inst, ir.SetItem):
126
+ if inst in self.setitems:
127
+ const, lit_val = self.setitems[inst]
128
+ new_inst = ir.StaticSetItem(target=inst.target,
129
+ index=lit_val,
130
+ index_var=inst.index,
131
+ value=inst.value,
132
+ loc=inst.loc)
133
+ self.calltypes[new_inst] = self.calltypes[inst]
134
+ inst = new_inst
135
+ new_block.append(inst)
136
+ return new_block
137
+
138
+
139
+ @register_rewrite('before-inference')
140
+ class RewriteConstSetitems(Rewrite):
141
+ """
142
+ Rewrite IR statements of the kind `setitem(target=arr, index=$constXX, ...)`
143
+ where `$constXX` is a known constant as
144
+ `static_setitem(target=arr, index=<constant value>, ...)`.
145
+ """
146
+
147
+ def match(self, func_ir, block, typemap, calltypes):
148
+ self.setitems = setitems = {}
149
+ self.block = block
150
+ # Detect all setitem statements and find which ones can be
151
+ # rewritten
152
+ for inst in block.find_insts(ir.SetItem):
153
+ try:
154
+ const = func_ir.infer_constant(inst.index)
155
+ except errors.ConstantInferenceError:
156
+ continue
157
+ setitems[inst] = const
158
+
159
+ return len(setitems) > 0
160
+
161
+ def apply(self):
162
+ """
163
+ Rewrite all matching setitems as static_setitems.
164
+ """
165
+ new_block = self.block.copy()
166
+ new_block.clear()
167
+ for inst in self.block.body:
168
+ if inst in self.setitems:
169
+ const = self.setitems[inst]
170
+ new_inst = ir.StaticSetItem(inst.target, const,
171
+ inst.index, inst.value, inst.loc)
172
+ new_block.append(new_inst)
173
+ else:
174
+ new_block.append(inst)
175
+ return new_block
lib/python3.10/site-packages/numba/core/rewrites/static_raise.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from numba.core import errors, ir, consts
2
+ from numba.core.rewrites import register_rewrite, Rewrite
3
+
4
+
5
+ @register_rewrite('before-inference')
6
+ class RewriteConstRaises(Rewrite):
7
+ """
8
+ Rewrite IR statements of the kind `raise(value)`
9
+ where `value` is the result of instantiating an exception with
10
+ constant arguments
11
+ into `static_raise(exception_type, constant args)`.
12
+
13
+ This allows lowering in nopython mode, where one can't instantiate
14
+ exception instances from runtime data.
15
+ """
16
+
17
+ def _is_exception_type(self, const):
18
+ return isinstance(const, type) and issubclass(const, Exception)
19
+
20
+ def _break_constant(self, const, loc):
21
+ """
22
+ Break down constant exception.
23
+ """
24
+ if isinstance(const, tuple): # it's a tuple(exception class, args)
25
+ if not self._is_exception_type(const[0]):
26
+ msg = "Encountered unsupported exception constant %r"
27
+ raise errors.UnsupportedError(msg % (const[0],), loc)
28
+ return const[0], tuple(const[1])
29
+ elif self._is_exception_type(const):
30
+ return const, None
31
+ else:
32
+ if isinstance(const, str):
33
+ msg = ("Directly raising a string constant as an exception is "
34
+ "not supported.")
35
+ else:
36
+ msg = "Encountered unsupported constant type used for exception"
37
+ raise errors.UnsupportedError(msg, loc)
38
+
39
+ def _try_infer_constant(self, func_ir, inst):
40
+ try:
41
+ return func_ir.infer_constant(inst.exception)
42
+ except consts.ConstantInferenceError:
43
+ # not a static exception
44
+ return None
45
+
46
+ def match(self, func_ir, block, typemap, calltypes):
47
+ self.raises = raises = {}
48
+ self.tryraises = tryraises = {}
49
+ self.block = block
50
+ # Detect all raise statements and find which ones can be
51
+ # rewritten
52
+ for inst in block.find_insts((ir.Raise, ir.TryRaise)):
53
+ if inst.exception is None:
54
+ # re-reraise
55
+ exc_type, exc_args = None, None
56
+ else:
57
+ # raise <something> => find the definition site for <something>
58
+ const = self._try_infer_constant(func_ir, inst)
59
+
60
+ # failure to infer constant indicates this isn't a static
61
+ # exception
62
+ if const is None:
63
+ continue
64
+
65
+ loc = inst.exception.loc
66
+ exc_type, exc_args = self._break_constant(const, loc)
67
+
68
+ if isinstance(inst, ir.Raise):
69
+ raises[inst] = exc_type, exc_args
70
+ elif isinstance(inst, ir.TryRaise):
71
+ tryraises[inst] = exc_type, exc_args
72
+ else:
73
+ raise ValueError('unexpected: {}'.format(type(inst)))
74
+ return (len(raises) + len(tryraises)) > 0
75
+
76
+ def apply(self):
77
+ """
78
+ Rewrite all matching setitems as static_setitems.
79
+ """
80
+ new_block = self.block.copy()
81
+ new_block.clear()
82
+ for inst in self.block.body:
83
+ if inst in self.raises:
84
+ exc_type, exc_args = self.raises[inst]
85
+ new_inst = ir.StaticRaise(exc_type, exc_args, inst.loc)
86
+ new_block.append(new_inst)
87
+ elif inst in self.tryraises:
88
+ exc_type, exc_args = self.tryraises[inst]
89
+ new_inst = ir.StaticTryRaise(exc_type, exc_args, inst.loc)
90
+ new_block.append(new_inst)
91
+ else:
92
+ new_block.append(inst)
93
+ return new_block
lib/python3.10/site-packages/numba/core/types/__init__.py ADDED
@@ -0,0 +1,386 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import struct
2
+
3
+ import numpy as np
4
+ from numba.core import utils
5
+
6
+ from .abstract import *
7
+ from .containers import *
8
+ from .functions import *
9
+ from .iterators import *
10
+ from .misc import *
11
+ from .npytypes import *
12
+ from .scalars import *
13
+ from .function_type import *
14
+
15
+ numpy_version = tuple(map(int, np.__version__.split('.')[:2]))
16
+
17
+ # Short names
18
+
19
+ pyobject = PyObject('pyobject')
20
+ ffi_forced_object = Opaque('ffi_forced_object')
21
+ ffi = Opaque('ffi')
22
+ none = NoneType('none')
23
+ ellipsis = EllipsisType('...')
24
+ Any = Phantom('any')
25
+ undefined = Undefined('undefined')
26
+ py2_string_type = Opaque('str')
27
+ unicode_type = UnicodeType('unicode_type')
28
+ string = unicode_type
29
+ unknown = Dummy('unknown')
30
+ npy_rng = NumPyRandomGeneratorType('rng')
31
+ npy_bitgen = NumPyRandomBitGeneratorType('bitgen')
32
+
33
+ # _undef_var is used to represent undefined variables in the type system.
34
+ _undef_var = UndefVar('_undef_var')
35
+
36
+ code_type = Opaque('code')
37
+ pyfunc_type = Opaque('pyfunc')
38
+
39
+ # No operation is defined on voidptr
40
+ # Can only pass it around
41
+ voidptr = RawPointer('void*')
42
+
43
+ # optional types
44
+ optional = Optional
45
+ deferred_type = DeferredType
46
+ slice2_type = SliceType('slice<a:b>', 2)
47
+ slice3_type = SliceType('slice<a:b:c>', 3)
48
+ void = none
49
+
50
+ # Need to ignore mypy errors because mypy cannot unify types for both
51
+ # the type systems even if they're logically mutually exclusive.
52
+ # mypy: ignore-errors
53
+
54
+ if config.USE_LEGACY_TYPE_SYSTEM: # type: ignore
55
+ boolean = bool_ = Boolean('bool')
56
+ if numpy_version >= (2, 0):
57
+ bool = bool_
58
+
59
+ byte = uint8 = Integer('uint8')
60
+ uint16 = Integer('uint16')
61
+ uint32 = Integer('uint32')
62
+ uint64 = Integer('uint64')
63
+
64
+ int8 = Integer('int8')
65
+ int16 = Integer('int16')
66
+ int32 = Integer('int32')
67
+ int64 = Integer('int64')
68
+ intp = int32 if utils.MACHINE_BITS == 32 else int64
69
+ uintp = uint32 if utils.MACHINE_BITS == 32 else uint64
70
+ intc = int32 if struct.calcsize('i') == 4 else int64
71
+ uintc = uint32 if struct.calcsize('I') == 4 else uint64
72
+ ssize_t = int32 if struct.calcsize('n') == 4 else int64
73
+ size_t = uint32 if struct.calcsize('N') == 4 else uint64
74
+
75
+ float32 = Float('float32')
76
+ float64 = Float('float64')
77
+ float16 = Float('float16')
78
+
79
+ complex64 = Complex('complex64', float32)
80
+ complex128 = Complex('complex128', float64)
81
+
82
+ range_iter32_type = RangeIteratorType(int32)
83
+ range_iter64_type = RangeIteratorType(int64)
84
+ unsigned_range_iter64_type = RangeIteratorType(uint64)
85
+ range_state32_type = RangeType(int32)
86
+ range_state64_type = RangeType(int64)
87
+ unsigned_range_state64_type = RangeType(uint64)
88
+
89
+ signed_domain = frozenset([int8, int16, int32, int64])
90
+ unsigned_domain = frozenset([uint8, uint16, uint32, uint64])
91
+ integer_domain = signed_domain | unsigned_domain
92
+ real_domain = frozenset([float32, float64])
93
+ complex_domain = frozenset([complex64, complex128])
94
+ number_domain = real_domain | integer_domain | complex_domain
95
+
96
+ # Integer Aliases
97
+ c_bool = py_bool = np_bool_ = boolean
98
+
99
+ c_uint8 = np_uint8 = uint8
100
+ c_uint16 = np_uint16 = uint16
101
+ c_uint32 = np_uint32 = uint32
102
+ c_uint64 = np_uint64 = uint64
103
+ c_uintp = np_uintp = uintp
104
+
105
+ c_int8 = np_int8 = int8
106
+ c_int16 = np_int16 = int16
107
+ c_int32 = np_int32 = int32
108
+ c_int64 = np_int64 = int64
109
+ c_intp = py_int = np_intp = intp
110
+
111
+ c_float16 = np_float16 = float16
112
+ c_float32 = np_float32 = float32
113
+ c_float64 = py_float = np_float64 = float64
114
+
115
+ np_complex64 = complex64
116
+ py_complex = np_complex128 = complex128
117
+
118
+ # Domain Aliases
119
+ py_signed_domain = np_signed_domain = signed_domain
120
+ np_unsigned_domain = unsigned_domain
121
+ py_integer_domain = np_integer_domain = integer_domain
122
+ py_real_domain = np_real_domain = real_domain
123
+ py_complex_domain = np_complex_domain = complex_domain
124
+ py_number_domain = np_number_domain = number_domain
125
+
126
+ # Aliases to NumPy type names
127
+
128
+ b1 = bool_
129
+ i1 = int8
130
+ i2 = int16
131
+ i4 = int32
132
+ i8 = int64
133
+ u1 = uint8
134
+ u2 = uint16
135
+ u4 = uint32
136
+ u8 = uint64
137
+
138
+ f2 = float16
139
+ f4 = float32
140
+ f8 = float64
141
+
142
+ c8 = complex64
143
+ c16 = complex128
144
+
145
+ np_float_ = float32
146
+ np_double = double = float64
147
+ if numpy_version < (2, 0):
148
+ float_ = float32
149
+
150
+ _make_signed = lambda x: globals()["int%d" % (np.dtype(x).itemsize * 8)]
151
+ _make_unsigned = lambda x: globals()["uint%d" % (np.dtype(x).itemsize * 8)]
152
+
153
+ char = np_char = _make_signed(np.byte)
154
+ uchar = np_uchar = byte = _make_unsigned(np.byte)
155
+ short = np_short = _make_signed(np.short)
156
+ ushort = np_ushort = _make_unsigned(np.short)
157
+ int_ = np_int_ = _make_signed(np.int_)
158
+ uint = np_uint = _make_unsigned(np.int_)
159
+ intc = np_intc = _make_signed(np.intc) # C-compat int
160
+ uintc = np_uintc = _make_unsigned(np.uintc) # C-compat uint
161
+ long_ = np_long = _make_signed(np.int_) # C-compat long
162
+ ulong = np_ulong = _make_unsigned(np.int_) # C-compat ulong
163
+ longlong = np_longlong = _make_signed(np.longlong)
164
+ ulonglong = np_ulonglong = _make_unsigned(np.longlong)
165
+
166
+ all_str = '''
167
+ int8
168
+ int16
169
+ int32
170
+ int64
171
+ uint8
172
+ uint16
173
+ uint32
174
+ uint64
175
+ intp
176
+ uintp
177
+ intc
178
+ uintc
179
+ ssize_t
180
+ size_t
181
+ boolean
182
+ float32
183
+ float64
184
+ complex64
185
+ complex128
186
+ bool_
187
+ byte
188
+ char
189
+ uchar
190
+ short
191
+ ushort
192
+ int_
193
+ uint
194
+ long_
195
+ ulong
196
+ longlong
197
+ ulonglong
198
+ float_
199
+ double
200
+ void
201
+ none
202
+ b1
203
+ i1
204
+ i2
205
+ i4
206
+ i8
207
+ u1
208
+ u2
209
+ u4
210
+ u8
211
+ f4
212
+ f8
213
+ c8
214
+ c16
215
+ optional
216
+ ffi_forced_object
217
+ ffi
218
+ deferred_type
219
+ '''
220
+ else:
221
+ from .new_scalars import *
222
+ ### Machine Datatypes ###
223
+ c_bool = MachineBoolean('c_bool')
224
+ c_byte = c_int8 = MachineInteger('c_int8')
225
+ c_int16 = MachineInteger('c_int16')
226
+ c_int32 = MachineInteger('c_int32')
227
+ c_int64 = MachineInteger('c_int64')
228
+ c_uint8 = MachineInteger('c_uint8')
229
+ c_uint16 = MachineInteger('c_uint16')
230
+ c_uint32 = MachineInteger('c_uint32')
231
+ c_uint64 = MachineInteger('c_uint64')
232
+
233
+ c_intp = c_int32 if utils.MACHINE_BITS == 32 else c_int64
234
+ c_uintp = c_uint32 if utils.MACHINE_BITS == 32 else c_uint64
235
+
236
+ # Machine Floats
237
+ c_float16 = MachineFloat('c_float16')
238
+ c_float32 = MachineFloat('c_float32')
239
+ c_float64 = MachineFloat('c_float64')
240
+
241
+ # Machine Complex
242
+ c_complex64 = MachineComplex('c_complex64', c_float32)
243
+ c_complex128 = MachineComplex('c_complex128', c_float64)
244
+
245
+ c_signed_domain = frozenset([c_int8, c_int16, c_int32, c_int64])
246
+ c_unsigned_domain = frozenset([c_uint8, c_uint16, c_uint32, c_uint64])
247
+ c_integer_domain = c_signed_domain | c_unsigned_domain
248
+ c_real_domain = frozenset([c_float32, c_float64])
249
+ c_complex_domain = frozenset([c_complex64, c_complex128])
250
+ c_number_domain = c_real_domain | c_integer_domain | c_complex_domain
251
+
252
+ ### Python Datatypes ###
253
+ # Python Integers
254
+ py_bool = PythonBoolean('py_bool')
255
+ py_int = PythonInteger('py_int')
256
+
257
+ # Python Float
258
+ py_float = PythonFloat('py_float')
259
+
260
+ # Python Complex
261
+ py_complex = PythonComplex('py_complex', py_float)
262
+
263
+ py_signed_domain = frozenset([py_int])
264
+ py_integer_domain = py_signed_domain
265
+ py_real_domain = frozenset([py_float])
266
+ py_complex_domain = frozenset([py_complex])
267
+ py_number_domain = py_real_domain | py_integer_domain | py_complex_domain
268
+
269
+ range_iter_type = RangeIteratorType(py_int)
270
+ range_state_type = RangeType(py_int)
271
+
272
+ ### NumPy Datatypes ###
273
+ # Numpy Integers
274
+ np_bool_ = np_bool = NumPyBoolean('np_bool_')
275
+ np_byte = np_int8 = NumPyInteger('np_int8')
276
+ np_int16 = NumPyInteger('np_int16')
277
+ np_int32 = NumPyInteger('np_int32')
278
+ np_int64 = NumPyInteger('np_int64')
279
+ np_uint8 = NumPyInteger('np_uint8')
280
+ np_uint16 = NumPyInteger('np_uint16')
281
+ np_uint32 = NumPyInteger('np_uint32')
282
+ np_uint64 = NumPyInteger('np_uint64')
283
+
284
+ np_intp = np_int32 if utils.MACHINE_BITS == 32 else np_int64
285
+ np_uintp = np_uint32 if utils.MACHINE_BITS == 32 else np_uint64
286
+
287
+ # NumPy Floats
288
+ np_float16 = NumPyFloat('np_float16')
289
+ np_float32 = NumPyFloat('np_float32')
290
+ np_float64 = NumPyFloat('np_float64')
291
+
292
+ # NumPy Complex
293
+ np_complex64 = NumPyComplex('np_complex64', np_float32)
294
+ np_complex128 = NumPyComplex('np_complex128', np_float64)
295
+
296
+ np_signed_domain = frozenset([np_int8, np_int16, np_int32, np_int64])
297
+ np_unsigned_domain = frozenset([np_uint8, np_uint16, np_uint32, np_uint64])
298
+ np_integer_domain = np_signed_domain | np_unsigned_domain
299
+ np_real_domain = frozenset([np_float32, np_float64])
300
+ np_complex_domain = frozenset([np_complex64, np_complex128])
301
+ np_number_domain = np_real_domain | np_integer_domain | np_complex_domain
302
+
303
+ # NumPy globals
304
+ np_double = np_float64
305
+ _make_signed = lambda x: globals()["np_int%d" % (np.dtype(x).itemsize * 8)]
306
+ _make_unsigned = lambda x: globals()["np_uint%d" % (np.dtype(x).itemsize * 8)]
307
+
308
+ np_char = _make_signed(np.byte)
309
+ np_uchar = byte = _make_unsigned(np.byte)
310
+ np_short = _make_signed(np.short)
311
+ np_ushort = _make_unsigned(np.short)
312
+ np_int_ = _make_signed(np.int_)
313
+ np_uint = _make_unsigned(np.int_)
314
+ np_intc = _make_signed(np.intc) # C-compat int
315
+ np_uintc = _make_unsigned(np.uintc) # C-compat uint
316
+ np_long_ = _make_signed(np.int_) # C-compat long
317
+ np_ulong = _make_unsigned(np.int_) # C-compat ulong
318
+ np_longlong = _make_signed(np.longlong)
319
+ np_ulonglong = _make_unsigned(np.longlong)
320
+
321
+ all_str = '''
322
+ c_bool
323
+ c_byte
324
+ c_int8
325
+ c_int16
326
+ c_int32
327
+ c_int64
328
+ c_uint8
329
+ c_uint16
330
+ c_uint32
331
+ c_uint64
332
+ c_intp
333
+ c_uintp
334
+ c_float16
335
+ c_float32
336
+ c_float64
337
+ c_complex64
338
+ c_complex128
339
+ py_bool
340
+ py_int
341
+ py_float
342
+ py_complex
343
+ np_bool_
344
+ np_bool
345
+ np_byte
346
+ np_int8
347
+ np_int16
348
+ np_int32
349
+ np_int64
350
+ np_uint8
351
+ np_uint16
352
+ np_uint32
353
+ np_uint64
354
+ np_intp
355
+ np_uintp
356
+ np_float16
357
+ np_float32
358
+ np_float64
359
+ np_complex64
360
+ np_complex128
361
+ np_double
362
+ np_char
363
+ np_uchar
364
+ np_short
365
+ np_ushort
366
+ np_int_
367
+ np_uint
368
+ np_intc
369
+ np_uintc
370
+ np_long_
371
+ np_ulong
372
+ np_longlong
373
+ np_ulonglong
374
+ ffi_forced_object
375
+ ffi
376
+ none
377
+ optional
378
+ deferred_type
379
+ void
380
+ '''
381
+
382
+
383
+ __all__ = all_str.split()
384
+ if numpy_version >= (2, 0) and config.USE_LEGACY_TYPE_SYSTEM:
385
+ __all__.remove('float_')
386
+ __all__.append('bool')
lib/python3.10/site-packages/numba/core/types/abstract.py ADDED
@@ -0,0 +1,512 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import ABCMeta, abstractmethod, abstractproperty
2
+ from typing import Dict as ptDict, Type as ptType
3
+ import itertools
4
+ import weakref
5
+ from functools import cached_property
6
+
7
+ import numpy as np
8
+
9
+ from numba.core.utils import get_hashable_key
10
+
11
+ # Types are added to a global registry (_typecache) in order to assign
12
+ # them unique integer codes for fast matching in _dispatcher.c.
13
+ # However, we also want types to be disposable, therefore we ensure
14
+ # each type is interned as a weak reference, so that it lives only as
15
+ # long as necessary to keep a stable type code.
16
+ # NOTE: some types can still be made immortal elsewhere (for example
17
+ # in _dispatcher.c's internal caches).
18
+ _typecodes = itertools.count()
19
+
20
+ def _autoincr():
21
+ n = next(_typecodes)
22
+ # 4 billion types should be enough, right?
23
+ assert n < 2 ** 32, "Limited to 4 billion types"
24
+ return n
25
+
26
+ _typecache: ptDict[weakref.ref, weakref.ref] = {}
27
+
28
+ def _on_type_disposal(wr, _pop=_typecache.pop):
29
+ _pop(wr, None)
30
+
31
+
32
+ class _TypeMetaclass(ABCMeta):
33
+ """
34
+ A metaclass that will intern instances after they are created.
35
+ This is done by first creating a new instance (including calling
36
+ __init__, which sets up the required attributes for equality
37
+ and hashing), then looking it up in the _typecache registry.
38
+ """
39
+
40
+ def __init__(cls, name, bases, orig_vars):
41
+ # __init__ is hooked to mark whether a Type class being defined is a
42
+ # Numba internal type (one which is defined somewhere under the `numba`
43
+ # module) or an external type (one which is defined elsewhere, for
44
+ # example a user defined type).
45
+ super(_TypeMetaclass, cls).__init__(name, bases, orig_vars)
46
+ root = (cls.__module__.split('.'))[0]
47
+ cls._is_internal = root == "numba"
48
+
49
+ def _intern(cls, inst):
50
+ # Try to intern the created instance
51
+ wr = weakref.ref(inst, _on_type_disposal)
52
+ orig = _typecache.get(wr)
53
+ orig = orig and orig()
54
+ if orig is not None:
55
+ return orig
56
+ else:
57
+ inst._code = _autoincr()
58
+ _typecache[wr] = wr
59
+ return inst
60
+
61
+ def __call__(cls, *args, **kwargs):
62
+ """
63
+ Instantiate *cls* (a Type subclass, presumably) and intern it.
64
+ If an interned instance already exists, it is returned, otherwise
65
+ the new instance is returned.
66
+ """
67
+ inst = type.__call__(cls, *args, **kwargs)
68
+ return cls._intern(inst)
69
+
70
+
71
+ def _type_reconstructor(reconstructor, reconstructor_args, state):
72
+ """
73
+ Rebuild function for unpickling types.
74
+ """
75
+ obj = reconstructor(*reconstructor_args)
76
+ if state:
77
+ obj.__dict__.update(state)
78
+ return type(obj)._intern(obj)
79
+
80
+
81
+ class Type(metaclass=_TypeMetaclass):
82
+ """
83
+ The base class for all Numba types.
84
+ It is essential that proper equality comparison is implemented. The
85
+ default implementation uses the "key" property (overridable in subclasses)
86
+ for both comparison and hashing, to ensure sane behaviour.
87
+ """
88
+
89
+ mutable = False
90
+ # Rather the type is reflected at the python<->nopython boundary
91
+ reflected = False
92
+
93
+ def __init__(self, name):
94
+ self.name = name
95
+
96
+ @property
97
+ def key(self):
98
+ """
99
+ A property used for __eq__, __ne__ and __hash__. Can be overridden
100
+ in subclasses.
101
+ """
102
+ return self.name
103
+
104
+ @property
105
+ def mangling_args(self):
106
+ """
107
+ Returns `(basename, args)` where `basename` is the name of the type
108
+ and `args` is a sequence of parameters of the type.
109
+
110
+ Subclass should override to specialize the behavior.
111
+ By default, this returns `(self.name, ())`.
112
+ """
113
+ return self.name, ()
114
+
115
+ def __repr__(self):
116
+ return self.name
117
+
118
+ def __str__(self):
119
+ return self.name
120
+
121
+ def __hash__(self):
122
+ return hash(self.key)
123
+
124
+ def __eq__(self, other):
125
+ return self.__class__ is other.__class__ and self.key == other.key
126
+
127
+ def __ne__(self, other):
128
+ return not (self == other)
129
+
130
+ def __reduce__(self):
131
+ reconstructor, args, state = super(Type, self).__reduce__()
132
+ return (_type_reconstructor, (reconstructor, args, state))
133
+
134
+ def unify(self, typingctx, other):
135
+ """
136
+ Try to unify this type with the *other*. A third type must
137
+ be returned, or None if unification is not possible.
138
+ Only override this if the coercion logic cannot be expressed
139
+ as simple casting rules.
140
+ """
141
+ return None
142
+
143
+ def can_convert_to(self, typingctx, other):
144
+ """
145
+ Check whether this type can be converted to the *other*.
146
+ If successful, must return a string describing the conversion, e.g.
147
+ "exact", "promote", "unsafe", "safe"; otherwise None is returned.
148
+ """
149
+ return None
150
+
151
+ def can_convert_from(self, typingctx, other):
152
+ """
153
+ Similar to *can_convert_to*, but in reverse. Only needed if
154
+ the type provides conversion from other types.
155
+ """
156
+ return None
157
+
158
+ def is_precise(self):
159
+ """
160
+ Whether this type is precise, i.e. can be part of a successful
161
+ type inference. Default implementation returns True.
162
+ """
163
+ return True
164
+
165
+ def augment(self, other):
166
+ """
167
+ Augment this type with the *other*. Return the augmented type,
168
+ or None if not supported.
169
+ """
170
+ return None
171
+
172
+ # User-facing helpers. These are not part of the core Type API but
173
+ # are provided so that users can write e.g. `numba.boolean(1.5)`
174
+ # (returns True) or `types.int32(types.int32[:])` (returns something
175
+ # usable as a function signature).
176
+
177
+ def __call__(self, *args):
178
+ from numba.core.typing import signature
179
+ if len(args) == 1 and not isinstance(args[0], Type):
180
+ return self.cast_python_value(args[0])
181
+ return signature(self, # return_type
182
+ *args)
183
+
184
+ def __getitem__(self, args):
185
+ """
186
+ Return an array of this type.
187
+ """
188
+ from numba.core.types import Array
189
+ ndim, layout = self._determine_array_spec(args)
190
+ return Array(dtype=self, ndim=ndim, layout=layout)
191
+
192
+ def _determine_array_spec(self, args):
193
+ # XXX non-contiguous by default, even for 1d arrays,
194
+ # doesn't sound very intuitive
195
+ def validate_slice(s):
196
+ return isinstance(s, slice) and s.start is None and s.stop is None
197
+
198
+ if isinstance(args, (tuple, list)) and all(map(validate_slice, args)):
199
+ ndim = len(args)
200
+ if args[0].step == 1:
201
+ layout = 'F'
202
+ elif args[-1].step == 1:
203
+ layout = 'C'
204
+ else:
205
+ layout = 'A'
206
+ elif validate_slice(args):
207
+ ndim = 1
208
+ if args.step == 1:
209
+ layout = 'C'
210
+ else:
211
+ layout = 'A'
212
+ else:
213
+ # Raise a KeyError to not be handled by collection constructors (e.g. list).
214
+ raise KeyError(f"Can only index numba types with slices with no start or stop, got {args}.")
215
+
216
+ return ndim, layout
217
+
218
+ def cast_python_value(self, args):
219
+ raise NotImplementedError
220
+
221
+
222
+ @property
223
+ def is_internal(self):
224
+ """ Returns True if this class is an internally defined Numba type by
225
+ virtue of the module in which it is instantiated, False else."""
226
+ return self._is_internal
227
+
228
+ def dump(self, tab=''):
229
+ print(f'{tab}DUMP {type(self).__name__}[code={self._code}, name={self.name}]')
230
+
231
+ # XXX we should distinguish between Dummy (no meaningful
232
+ # representation, e.g. None or a builtin function) and Opaque (has a
233
+ # meaningful representation, e.g. ExternalFunctionPointer)
234
+
235
+ class Dummy(Type):
236
+ """
237
+ Base class for types that do not really have a representation and are
238
+ compatible with a void*.
239
+ """
240
+
241
+
242
+ class Hashable(Type):
243
+ """
244
+ Base class for hashable types.
245
+ """
246
+
247
+
248
+ class Number(Hashable):
249
+ """
250
+ Base class for number types.
251
+ """
252
+
253
+ def unify(self, typingctx, other):
254
+ """
255
+ Unify the two number types using Numpy's rules.
256
+ """
257
+ from numba.np import numpy_support
258
+ if isinstance(other, Number):
259
+ # XXX: this can produce unsafe conversions,
260
+ # e.g. would unify {int64, uint64} to float64
261
+ a = numpy_support.as_dtype(self)
262
+ b = numpy_support.as_dtype(other)
263
+ sel = np.promote_types(a, b)
264
+ return numpy_support.from_dtype(sel)
265
+
266
+
267
+ class Callable(Type):
268
+ """
269
+ Base class for callables.
270
+ """
271
+
272
+ @abstractmethod
273
+ def get_call_type(self, context, args, kws):
274
+ """
275
+ Using the typing *context*, resolve the callable's signature for
276
+ the given arguments. A signature object is returned, or None.
277
+ """
278
+
279
+ @abstractmethod
280
+ def get_call_signatures(self):
281
+ """
282
+ Returns a tuple of (list of signatures, parameterized)
283
+ """
284
+
285
+ @abstractmethod
286
+ def get_impl_key(self, sig):
287
+ """
288
+ Returns the impl key for the given signature
289
+ """
290
+
291
+
292
+ class DTypeSpec(Type):
293
+ """
294
+ Base class for types usable as "dtype" arguments to various Numpy APIs
295
+ (e.g. np.empty()).
296
+ """
297
+
298
+ @abstractproperty
299
+ def dtype(self):
300
+ """
301
+ The actual dtype denoted by this dtype spec (a Type instance).
302
+ """
303
+
304
+
305
+ class IterableType(Type):
306
+ """
307
+ Base class for iterable types.
308
+ """
309
+
310
+ @abstractproperty
311
+ def iterator_type(self):
312
+ """
313
+ The iterator type obtained when calling iter() (explicitly or implicitly).
314
+ """
315
+
316
+
317
+ class Sized(Type):
318
+ """
319
+ Base class for objects that support len()
320
+ """
321
+
322
+
323
+ class ConstSized(Sized):
324
+ """
325
+ For types that have a constant size
326
+ """
327
+ @abstractmethod
328
+ def __len__(self):
329
+ pass
330
+
331
+
332
+ class IteratorType(IterableType):
333
+ """
334
+ Base class for all iterator types.
335
+ Derived classes should implement the *yield_type* attribute.
336
+ """
337
+
338
+ def __init__(self, name, **kwargs):
339
+ super(IteratorType, self).__init__(name, **kwargs)
340
+
341
+ @abstractproperty
342
+ def yield_type(self):
343
+ """
344
+ The type of values yielded by the iterator.
345
+ """
346
+
347
+ # This is a property to avoid recursivity (for pickling)
348
+
349
+ @property
350
+ def iterator_type(self):
351
+ return self
352
+
353
+
354
+ class Container(Sized, IterableType):
355
+ """
356
+ Base class for container types.
357
+ """
358
+
359
+
360
+ class Sequence(Container):
361
+ """
362
+ Base class for 1d sequence types. Instances should have the *dtype*
363
+ attribute.
364
+ """
365
+
366
+
367
+ class MutableSequence(Sequence):
368
+ """
369
+ Base class for 1d mutable sequence types. Instances should have the
370
+ *dtype* attribute.
371
+ """
372
+
373
+ mutable = True
374
+
375
+ class ArrayCompatible(Type):
376
+ """
377
+ Type class for Numpy array-compatible objects (typically, objects
378
+ exposing an __array__ method).
379
+ Derived classes should implement the *as_array* attribute.
380
+ """
381
+ # If overridden by a subclass, it should also implement typing
382
+ # for '__array_wrap__' with arguments (input, formal result).
383
+ array_priority = 0.0
384
+
385
+ @abstractproperty
386
+ def as_array(self):
387
+ """
388
+ The equivalent array type, for operations supporting array-compatible
389
+ objects (such as ufuncs).
390
+ """
391
+
392
+ # For compatibility with types.Array
393
+
394
+ @cached_property
395
+ def ndim(self):
396
+ return self.as_array.ndim
397
+
398
+ @cached_property
399
+ def layout(self):
400
+ return self.as_array.layout
401
+
402
+ @cached_property
403
+ def dtype(self):
404
+ return self.as_array.dtype
405
+
406
+
407
+ class Literal(Type):
408
+ """Base class for Literal types.
409
+ Literal types contain the original Python value in the type.
410
+
411
+ A literal type should always be constructed from the `literal(val)`
412
+ function.
413
+ """
414
+
415
+ # *ctor_map* is a dictionary mapping Python types to Literal subclasses
416
+ # for constructing a numba type for a given Python type.
417
+ # It is used in `literal(val)` function.
418
+ # To add new Literal subclass, register a new mapping to this dict.
419
+ ctor_map: ptDict[type, ptType['Literal']] = {}
420
+
421
+ # *_literal_type_cache* is used to cache the numba type of the given value.
422
+ _literal_type_cache = None
423
+
424
+ def __init__(self, value):
425
+ if type(self) is Literal:
426
+ raise TypeError(
427
+ "Cannot be constructed directly. "
428
+ "Use `numba.types.literal(value)` instead",
429
+ )
430
+ self._literal_init(value)
431
+ fmt = "Literal[{}]({})"
432
+ super(Literal, self).__init__(fmt.format(type(value).__name__, value))
433
+
434
+ def _literal_init(self, value):
435
+ self._literal_value = value
436
+ # We want to support constants of non-hashable values, therefore
437
+ # fall back on the value's id() if necessary.
438
+ self._key = get_hashable_key(value)
439
+
440
+ @property
441
+ def literal_value(self):
442
+ return self._literal_value
443
+
444
+ @property
445
+ def literal_type(self):
446
+ if self._literal_type_cache is None:
447
+ from numba.core import typing
448
+ ctx = typing.Context()
449
+ try:
450
+ res = ctx.resolve_value_type(self.literal_value)
451
+ except ValueError as e:
452
+
453
+ if "Int value is too large" in str(e):
454
+ # If a string literal cannot create an IntegerLiteral
455
+ # because of overflow we generate this message.
456
+ msg = f"Cannot create literal type. {str(e)}"
457
+ raise TypeError(msg)
458
+ # Not all literal types have a literal_value that can be
459
+ # resolved to a type, for example, LiteralStrKeyDict has a
460
+ # literal_value that is a python dict for which there's no
461
+ # `typeof` support.
462
+ msg = "{} has no attribute 'literal_type'".format(self)
463
+ raise AttributeError(msg)
464
+ self._literal_type_cache = res
465
+
466
+ return self._literal_type_cache
467
+
468
+
469
+
470
+ class TypeRef(Dummy):
471
+ """Reference to a type.
472
+
473
+ Used when a type is passed as a value.
474
+ """
475
+ def __init__(self, instance_type):
476
+ self.instance_type = instance_type
477
+ super(TypeRef, self).__init__('typeref[{}]'.format(self.instance_type))
478
+
479
+ @property
480
+ def key(self):
481
+ return self.instance_type
482
+
483
+
484
+ class InitialValue(object):
485
+ """
486
+ Used as a mixin for a type will potentially have an initial value that will
487
+ be carried in the .initial_value attribute.
488
+ """
489
+ def __init__(self, initial_value):
490
+ self._initial_value = initial_value
491
+
492
+ @property
493
+ def initial_value(self):
494
+ return self._initial_value
495
+
496
+
497
+ class Poison(Type):
498
+ """
499
+ This is the "bottom" type in the type system. It won't unify and it's
500
+ unliteral version is Poison of itself. It's advisable for debugging purposes
501
+ to call the constructor with the type that's being poisoned (for whatever
502
+ reason) but this isn't strictly required.
503
+ """
504
+ def __init__(self, ty):
505
+ self.ty = ty
506
+ super(Poison, self).__init__(name="Poison<%s>" % ty)
507
+
508
+ def __unliteral__(self):
509
+ return Poison(self)
510
+
511
+ def unify(self, typingctx, other):
512
+ return None
lib/python3.10/site-packages/numba/core/types/common.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Helper classes / mixins for defining types.
3
+ """
4
+
5
+ from .abstract import ArrayCompatible, Dummy, IterableType, IteratorType
6
+ from numba.core.errors import NumbaTypeError, NumbaValueError
7
+
8
+
9
+ class Opaque(Dummy):
10
+ """
11
+ A type that is a opaque pointer.
12
+ """
13
+
14
+
15
+ class SimpleIterableType(IterableType):
16
+
17
+ def __init__(self, name, iterator_type):
18
+ self._iterator_type = iterator_type
19
+ super(SimpleIterableType, self).__init__(name)
20
+
21
+ @property
22
+ def iterator_type(self):
23
+ return self._iterator_type
24
+
25
+
26
+ class SimpleIteratorType(IteratorType):
27
+
28
+ def __init__(self, name, yield_type):
29
+ self._yield_type = yield_type
30
+ super(SimpleIteratorType, self).__init__(name)
31
+
32
+ @property
33
+ def yield_type(self):
34
+ return self._yield_type
35
+
36
+
37
+ class Buffer(IterableType, ArrayCompatible):
38
+ """
39
+ Type class for objects providing the buffer protocol.
40
+ Derived classes exist for more specific cases.
41
+ """
42
+ mutable = True
43
+ slice_is_copy = False
44
+ aligned = True
45
+
46
+ # CS and FS are not reserved for inner contig but strided
47
+ LAYOUTS = frozenset(['C', 'F', 'CS', 'FS', 'A'])
48
+
49
+ def __init__(self, dtype, ndim, layout, readonly=False, name=None):
50
+ from .misc import unliteral
51
+
52
+ if isinstance(dtype, Buffer):
53
+ msg = ("The dtype of a Buffer type cannot itself be a Buffer type, "
54
+ "this is unsupported behaviour."
55
+ "\nThe dtype requested for the unsupported Buffer was: {}.")
56
+ raise NumbaTypeError(msg.format(dtype))
57
+ if layout not in self.LAYOUTS:
58
+ raise NumbaValueError("Invalid layout '%s'" % layout)
59
+ self.dtype = unliteral(dtype)
60
+ self.ndim = ndim
61
+ self.layout = layout
62
+ if readonly:
63
+ self.mutable = False
64
+ if name is None:
65
+ type_name = self.__class__.__name__.lower()
66
+ if readonly:
67
+ type_name = "readonly %s" % type_name
68
+ name = "%s(%s, %sd, %s)" % (type_name, dtype, ndim, layout)
69
+ super(Buffer, self).__init__(name)
70
+
71
+ @property
72
+ def iterator_type(self):
73
+ from .iterators import ArrayIterator
74
+ return ArrayIterator(self)
75
+
76
+ @property
77
+ def as_array(self):
78
+ return self
79
+
80
+ def copy(self, dtype=None, ndim=None, layout=None):
81
+ if dtype is None:
82
+ dtype = self.dtype
83
+ if ndim is None:
84
+ ndim = self.ndim
85
+ if layout is None:
86
+ layout = self.layout
87
+ return self.__class__(dtype=dtype, ndim=ndim, layout=layout,
88
+ readonly=not self.mutable)
89
+
90
+ @property
91
+ def key(self):
92
+ return self.dtype, self.ndim, self.layout, self.mutable
93
+
94
+ @property
95
+ def is_c_contig(self):
96
+ return self.layout == 'C' or (self.ndim <= 1 and self.layout in 'CF')
97
+
98
+ @property
99
+ def is_f_contig(self):
100
+ return self.layout == 'F' or (self.ndim <= 1 and self.layout in 'CF')
101
+
102
+ @property
103
+ def is_contig(self):
104
+ return self.layout in 'CF'
lib/python3.10/site-packages/numba/core/types/containers.py ADDED
@@ -0,0 +1,974 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections.abc import Iterable
2
+ from collections.abc import Sequence as pySequence
3
+ from types import MappingProxyType
4
+
5
+ from .abstract import (
6
+ ConstSized,
7
+ Container,
8
+ Hashable,
9
+ MutableSequence,
10
+ Sequence,
11
+ Type,
12
+ TypeRef,
13
+ Literal,
14
+ InitialValue,
15
+ Poison,
16
+ )
17
+ from .common import (
18
+ Buffer,
19
+ IterableType,
20
+ SimpleIterableType,
21
+ SimpleIteratorType,
22
+ )
23
+ from .misc import Undefined, unliteral, Optional, NoneType
24
+ from ..typeconv import Conversion
25
+ from ..errors import TypingError
26
+ from .. import utils
27
+
28
+
29
+ class Pair(Type):
30
+ """
31
+ A heterogeneous pair.
32
+ """
33
+
34
+ def __init__(self, first_type, second_type):
35
+ self.first_type = first_type
36
+ self.second_type = second_type
37
+ name = "pair<%s, %s>" % (first_type, second_type)
38
+ super(Pair, self).__init__(name=name)
39
+
40
+ @property
41
+ def key(self):
42
+ return self.first_type, self.second_type
43
+
44
+ def unify(self, typingctx, other):
45
+ if isinstance(other, Pair):
46
+ first = typingctx.unify_pairs(self.first_type, other.first_type)
47
+ second = typingctx.unify_pairs(self.second_type, other.second_type)
48
+ if first is not None and second is not None:
49
+ return Pair(first, second)
50
+
51
+
52
+ class BaseContainerIterator(SimpleIteratorType):
53
+ """
54
+ Convenience base class for some container iterators.
55
+
56
+ Derived classes must implement the *container_class* attribute.
57
+ """
58
+
59
+ def __init__(self, container):
60
+ assert isinstance(container, self.container_class), container
61
+ self.container = container
62
+ yield_type = container.dtype
63
+ name = "iter(%s)" % container
64
+ super(BaseContainerIterator, self).__init__(name, yield_type)
65
+
66
+ def unify(self, typingctx, other):
67
+ cls = type(self)
68
+ if isinstance(other, cls):
69
+ container = typingctx.unify_pairs(self.container, other.container)
70
+ if container is not None:
71
+ return cls(container)
72
+
73
+ @property
74
+ def key(self):
75
+ return self.container
76
+
77
+
78
+ class BaseContainerPayload(Type):
79
+ """
80
+ Convenience base class for some container payloads.
81
+
82
+ Derived classes must implement the *container_class* attribute.
83
+ """
84
+
85
+ def __init__(self, container):
86
+ assert isinstance(container, self.container_class)
87
+ self.container = container
88
+ name = "payload(%s)" % container
89
+ super(BaseContainerPayload, self).__init__(name)
90
+
91
+ @property
92
+ def key(self):
93
+ return self.container
94
+
95
+
96
+ class Bytes(Buffer):
97
+ """
98
+ Type class for Python 3.x bytes objects.
99
+ """
100
+
101
+ mutable = False
102
+ # Actually true but doesn't matter since bytes is immutable
103
+ slice_is_copy = False
104
+
105
+
106
+ class ByteArray(Buffer):
107
+ """
108
+ Type class for bytearray objects.
109
+ """
110
+
111
+ slice_is_copy = True
112
+
113
+
114
+ class PyArray(Buffer):
115
+ """
116
+ Type class for array.array objects.
117
+ """
118
+
119
+ slice_is_copy = True
120
+
121
+
122
+ class MemoryView(Buffer):
123
+ """
124
+ Type class for memoryview objects.
125
+ """
126
+
127
+
128
+ def is_homogeneous(*tys):
129
+ """Are the types homogeneous?
130
+ """
131
+ if tys:
132
+ first, tys = tys[0], tys[1:]
133
+ return not any(t != first for t in tys)
134
+ else:
135
+ # *tys* is empty.
136
+ return False
137
+
138
+
139
+ class BaseTuple(ConstSized, Hashable):
140
+ """
141
+ The base class for all tuple types (with a known size).
142
+ """
143
+
144
+ @classmethod
145
+ def from_types(cls, tys, pyclass=None):
146
+ """
147
+ Instantiate the right tuple type for the given element types.
148
+ """
149
+ if pyclass is not None and pyclass is not tuple:
150
+ # A subclass => is it a namedtuple?
151
+ assert issubclass(pyclass, tuple)
152
+ if hasattr(pyclass, "_asdict"):
153
+ tys = tuple(map(unliteral, tys))
154
+ homogeneous = is_homogeneous(*tys)
155
+ if homogeneous:
156
+ return NamedUniTuple(tys[0], len(tys), pyclass)
157
+ else:
158
+ return NamedTuple(tys, pyclass)
159
+ else:
160
+ dtype = utils.unified_function_type(tys)
161
+ if dtype is not None:
162
+ return UniTuple(dtype, len(tys))
163
+ # non-named tuple
164
+ homogeneous = is_homogeneous(*tys)
165
+ if homogeneous:
166
+ return cls._make_homogeneous_tuple(tys[0], len(tys))
167
+ else:
168
+ return cls._make_heterogeneous_tuple(tys)
169
+
170
+ @classmethod
171
+ def _make_homogeneous_tuple(cls, dtype, count):
172
+ return UniTuple(dtype, count)
173
+
174
+ @classmethod
175
+ def _make_heterogeneous_tuple(cls, tys):
176
+ return Tuple(tys)
177
+
178
+
179
+ class BaseAnonymousTuple(BaseTuple):
180
+ """
181
+ Mixin for non-named tuples.
182
+ """
183
+
184
+ def can_convert_to(self, typingctx, other):
185
+ """
186
+ Convert this tuple to another one. Note named tuples are rejected.
187
+ """
188
+ if not isinstance(other, BaseAnonymousTuple):
189
+ return
190
+ if len(self) != len(other):
191
+ return
192
+ if len(self) == 0:
193
+ return Conversion.safe
194
+ if isinstance(other, BaseTuple):
195
+ kinds = [
196
+ typingctx.can_convert(ta, tb) for ta, tb in zip(self, other)
197
+ ]
198
+ if any(kind is None for kind in kinds):
199
+ return
200
+ return max(kinds)
201
+
202
+ def __unliteral__(self):
203
+ return type(self).from_types([unliteral(t) for t in self])
204
+
205
+
206
+ class _HomogeneousTuple(Sequence, BaseTuple):
207
+ @property
208
+ def iterator_type(self):
209
+ return UniTupleIter(self)
210
+
211
+ def __getitem__(self, i):
212
+ """
213
+ Return element at position i
214
+ """
215
+ return self.dtype
216
+
217
+ def __iter__(self):
218
+ return iter([self.dtype] * self.count)
219
+
220
+ def __len__(self):
221
+ return self.count
222
+
223
+ @property
224
+ def types(self):
225
+ return (self.dtype,) * self.count
226
+
227
+
228
+ class UniTuple(BaseAnonymousTuple, _HomogeneousTuple, Sequence):
229
+ """
230
+ Type class for homogeneous tuples.
231
+ """
232
+
233
+ def __init__(self, dtype, count):
234
+ self.dtype = dtype
235
+ self.count = count
236
+ name = "%s(%s x %d)" % (self.__class__.__name__, dtype, count,)
237
+ super(UniTuple, self).__init__(name)
238
+
239
+ @property
240
+ def mangling_args(self):
241
+ return self.__class__.__name__, (self.dtype, self.count)
242
+
243
+ @property
244
+ def key(self):
245
+ return self.dtype, self.count
246
+
247
+ def unify(self, typingctx, other):
248
+ """
249
+ Unify UniTuples with their dtype
250
+ """
251
+ if isinstance(other, UniTuple) and len(self) == len(other):
252
+ dtype = typingctx.unify_pairs(self.dtype, other.dtype)
253
+ if dtype is not None:
254
+ return UniTuple(dtype=dtype, count=self.count)
255
+
256
+ def __unliteral__(self):
257
+ return type(self)(dtype=unliteral(self.dtype), count=self.count)
258
+
259
+ def __repr__(self):
260
+ return f"UniTuple({repr(self.dtype)}, {self.count})"
261
+
262
+
263
+ class UniTupleIter(BaseContainerIterator):
264
+ """
265
+ Type class for homogeneous tuple iterators.
266
+ """
267
+
268
+ container_class = _HomogeneousTuple
269
+
270
+
271
+ class _HeterogeneousTuple(BaseTuple):
272
+ def __getitem__(self, i):
273
+ """
274
+ Return element at position i
275
+ """
276
+ return self.types[i]
277
+
278
+ def __len__(self):
279
+ # Beware: this makes Tuple(()) false-ish
280
+ return len(self.types)
281
+
282
+ def __iter__(self):
283
+ return iter(self.types)
284
+
285
+ @staticmethod
286
+ def is_types_iterable(types):
287
+ # issue 4463 - check if argument 'types' is iterable
288
+ if not isinstance(types, Iterable):
289
+ raise TypingError("Argument 'types' is not iterable")
290
+
291
+
292
+ class UnionType(Type):
293
+ def __init__(self, types):
294
+ self.types = tuple(sorted(set(types), key=lambda x: x.name))
295
+ name = "Union[{}]".format(",".join(map(str, self.types)))
296
+ super(UnionType, self).__init__(name=name)
297
+
298
+ def get_type_tag(self, typ):
299
+ return self.types.index(typ)
300
+
301
+
302
+ class Tuple(BaseAnonymousTuple, _HeterogeneousTuple):
303
+ def __new__(cls, types):
304
+
305
+ t = utils.unified_function_type(types, require_precise=True)
306
+ if t is not None:
307
+ return UniTuple(dtype=t, count=len(types))
308
+
309
+ _HeterogeneousTuple.is_types_iterable(types)
310
+
311
+ if types and all(t == types[0] for t in types[1:]):
312
+ return UniTuple(dtype=types[0], count=len(types))
313
+ else:
314
+ return object.__new__(Tuple)
315
+
316
+ def __init__(self, types):
317
+ self.types = tuple(types)
318
+ self.count = len(self.types)
319
+ self.dtype = UnionType(types)
320
+ name = "%s(%s)" % (
321
+ self.__class__.__name__,
322
+ ", ".join(str(i) for i in self.types),
323
+ )
324
+ super(Tuple, self).__init__(name)
325
+
326
+ @property
327
+ def mangling_args(self):
328
+ return self.__class__.__name__, tuple(t for t in self.types)
329
+
330
+ @property
331
+ def key(self):
332
+ return self.types
333
+
334
+ def unify(self, typingctx, other):
335
+ """
336
+ Unify elements of Tuples/UniTuples
337
+ """
338
+ # Other is UniTuple or Tuple
339
+ if isinstance(other, BaseTuple) and len(self) == len(other):
340
+ unified = [
341
+ typingctx.unify_pairs(ta, tb) for ta, tb in zip(self, other)
342
+ ]
343
+
344
+ if all(t is not None for t in unified):
345
+ return Tuple(unified)
346
+
347
+ def __repr__(self):
348
+ return f"Tuple({tuple(ty for ty in self.types)})"
349
+
350
+
351
+ class _StarArgTupleMixin:
352
+ @classmethod
353
+ def _make_homogeneous_tuple(cls, dtype, count):
354
+ return StarArgUniTuple(dtype, count)
355
+
356
+ @classmethod
357
+ def _make_heterogeneous_tuple(cls, tys):
358
+ return StarArgTuple(tys)
359
+
360
+
361
+ class StarArgTuple(_StarArgTupleMixin, Tuple):
362
+ """To distinguish from Tuple() used as argument to a `*args`.
363
+ """
364
+
365
+ def __new__(cls, types):
366
+ _HeterogeneousTuple.is_types_iterable(types)
367
+
368
+ if types and all(t == types[0] for t in types[1:]):
369
+ return StarArgUniTuple(dtype=types[0], count=len(types))
370
+ else:
371
+ return object.__new__(StarArgTuple)
372
+
373
+
374
+ class StarArgUniTuple(_StarArgTupleMixin, UniTuple):
375
+ """To distinguish from UniTuple() used as argument to a `*args`.
376
+ """
377
+
378
+
379
+ class BaseNamedTuple(BaseTuple):
380
+ pass
381
+
382
+
383
+ class NamedUniTuple(_HomogeneousTuple, BaseNamedTuple):
384
+ def __init__(self, dtype, count, cls):
385
+ self.dtype = dtype
386
+ self.count = count
387
+ self.fields = tuple(cls._fields)
388
+ self.instance_class = cls
389
+ name = "%s(%s x %d)" % (cls.__name__, dtype, count)
390
+ super(NamedUniTuple, self).__init__(name)
391
+
392
+ @property
393
+ def iterator_type(self):
394
+ return UniTupleIter(self)
395
+
396
+ @property
397
+ def key(self):
398
+ return self.instance_class, self.dtype, self.count
399
+
400
+
401
+ class NamedTuple(_HeterogeneousTuple, BaseNamedTuple):
402
+ def __init__(self, types, cls):
403
+ _HeterogeneousTuple.is_types_iterable(types)
404
+
405
+ self.types = tuple(types)
406
+ self.count = len(self.types)
407
+ self.fields = tuple(cls._fields)
408
+ self.instance_class = cls
409
+ name = "%s(%s)" % (cls.__name__, ", ".join(str(i) for i in self.types))
410
+ super(NamedTuple, self).__init__(name)
411
+
412
+ @property
413
+ def key(self):
414
+ return self.instance_class, self.types
415
+
416
+
417
+ class List(MutableSequence, InitialValue):
418
+ """
419
+ Type class for (arbitrary-sized) homogeneous lists.
420
+ """
421
+
422
+ def __init__(self, dtype, reflected=False, initial_value=None):
423
+ dtype = unliteral(dtype)
424
+ self.dtype = dtype
425
+ self.reflected = reflected
426
+ cls_name = "reflected list" if reflected else "list"
427
+ name = "%s(%s)<iv=%s>" % (cls_name, self.dtype, initial_value)
428
+ super(List, self).__init__(name=name)
429
+ InitialValue.__init__(self, initial_value)
430
+
431
+ def copy(self, dtype=None, reflected=None):
432
+ if dtype is None:
433
+ dtype = self.dtype
434
+ if reflected is None:
435
+ reflected = self.reflected
436
+ return List(dtype, reflected, self.initial_value)
437
+
438
+ def unify(self, typingctx, other):
439
+ if isinstance(other, List):
440
+ dtype = typingctx.unify_pairs(self.dtype, other.dtype)
441
+ reflected = self.reflected or other.reflected
442
+ if dtype is not None:
443
+ siv = self.initial_value
444
+ oiv = other.initial_value
445
+ if siv is not None and oiv is not None:
446
+ use = siv
447
+ if siv is None:
448
+ use = oiv
449
+ return List(dtype, reflected, use)
450
+ else:
451
+ return List(dtype, reflected)
452
+
453
+ @property
454
+ def key(self):
455
+ return self.dtype, self.reflected, str(self.initial_value)
456
+
457
+ @property
458
+ def iterator_type(self):
459
+ return ListIter(self)
460
+
461
+ def is_precise(self):
462
+ return self.dtype.is_precise()
463
+
464
+ def __getitem__(self, args):
465
+ """
466
+ Overrides the default __getitem__ from Type.
467
+ """
468
+ return self.dtype
469
+
470
+ def __unliteral__(self):
471
+ return List(self.dtype, reflected=self.reflected,
472
+ initial_value=None)
473
+
474
+ def __repr__(self):
475
+ return f"List({self.dtype}, {self.reflected})"
476
+
477
+
478
+ class LiteralList(Literal, ConstSized, Hashable):
479
+ """A heterogeneous immutable list (basically a tuple with list semantics).
480
+ """
481
+
482
+ mutable = False
483
+
484
+ def __init__(self, literal_value):
485
+ self.is_types_iterable(literal_value)
486
+ self._literal_init(list(literal_value))
487
+ self.types = tuple(literal_value)
488
+ self.count = len(self.types)
489
+ self.name = "LiteralList({})".format(literal_value)
490
+
491
+ def __getitem__(self, i):
492
+ """
493
+ Return element at position i
494
+ """
495
+ return self.types[i]
496
+
497
+ def __len__(self):
498
+ return len(self.types)
499
+
500
+ def __iter__(self):
501
+ return iter(self.types)
502
+
503
+ @classmethod
504
+ def from_types(cls, tys):
505
+ return LiteralList(tys)
506
+
507
+ @staticmethod
508
+ def is_types_iterable(types):
509
+ if not isinstance(types, Iterable):
510
+ raise TypingError("Argument 'types' is not iterable")
511
+
512
+ @property
513
+ def iterator_type(self):
514
+ return ListIter(self)
515
+
516
+ def __unliteral__(self):
517
+ return Poison(self)
518
+
519
+ def unify(self, typingctx, other):
520
+ """
521
+ Unify this with the *other* one.
522
+ """
523
+ if isinstance(other, LiteralList) and self.count == other.count:
524
+ tys = []
525
+ for i1, i2 in zip(self.types, other.types):
526
+ tys.append(typingctx.unify_pairs(i1, i2))
527
+ if all(tys):
528
+ return LiteralList(tys)
529
+
530
+
531
+ class ListIter(BaseContainerIterator):
532
+ """
533
+ Type class for list iterators.
534
+ """
535
+
536
+ container_class = List
537
+
538
+
539
+ class ListPayload(BaseContainerPayload):
540
+ """
541
+ Internal type class for the dynamically-allocated payload of a list.
542
+ """
543
+
544
+ container_class = List
545
+
546
+
547
+ class Set(Container):
548
+ """
549
+ Type class for homogeneous sets.
550
+ """
551
+
552
+ mutable = True
553
+
554
+ def __init__(self, dtype, reflected=False):
555
+ assert isinstance(dtype, (Hashable, Undefined))
556
+ self.dtype = dtype
557
+ self.reflected = reflected
558
+ cls_name = "reflected set" if reflected else "set"
559
+ name = "%s(%s)" % (cls_name, self.dtype)
560
+ super(Set, self).__init__(name=name)
561
+
562
+ @property
563
+ def key(self):
564
+ return self.dtype, self.reflected
565
+
566
+ @property
567
+ def iterator_type(self):
568
+ return SetIter(self)
569
+
570
+ def is_precise(self):
571
+ return self.dtype.is_precise()
572
+
573
+ def copy(self, dtype=None, reflected=None):
574
+ if dtype is None:
575
+ dtype = self.dtype
576
+ if reflected is None:
577
+ reflected = self.reflected
578
+ return Set(dtype, reflected)
579
+
580
+ def unify(self, typingctx, other):
581
+ if isinstance(other, Set):
582
+ dtype = typingctx.unify_pairs(self.dtype, other.dtype)
583
+ reflected = self.reflected or other.reflected
584
+ if dtype is not None:
585
+ return Set(dtype, reflected)
586
+
587
+ def __repr__(self):
588
+ return f"Set({self.dtype}, {self.reflected})"
589
+
590
+
591
+ class SetIter(BaseContainerIterator):
592
+ """
593
+ Type class for set iterators.
594
+ """
595
+
596
+ container_class = Set
597
+
598
+
599
+ class SetPayload(BaseContainerPayload):
600
+ """
601
+ Internal type class for the dynamically-allocated payload of a set.
602
+ """
603
+
604
+ container_class = Set
605
+
606
+
607
+ class SetEntry(Type):
608
+ """
609
+ Internal type class for the entries of a Set's hash table.
610
+ """
611
+
612
+ def __init__(self, set_type):
613
+ self.set_type = set_type
614
+ name = "entry(%s)" % set_type
615
+ super(SetEntry, self).__init__(name)
616
+
617
+ @property
618
+ def key(self):
619
+ return self.set_type
620
+
621
+
622
+ class ListType(IterableType):
623
+ """List type
624
+ """
625
+
626
+ mutable = True
627
+
628
+ def __init__(self, itemty):
629
+ assert not isinstance(itemty, TypeRef)
630
+ itemty = unliteral(itemty)
631
+ if isinstance(itemty, Optional):
632
+ fmt = "List.item_type cannot be of type {}"
633
+ raise TypingError(fmt.format(itemty))
634
+ # FIXME: _sentry_forbidden_types(itemty)
635
+ self.item_type = itemty
636
+ self.dtype = itemty
637
+ name = "{}[{}]".format(self.__class__.__name__, itemty,)
638
+ super(ListType, self).__init__(name)
639
+
640
+ @property
641
+ def key(self):
642
+ return self.item_type
643
+
644
+ def is_precise(self):
645
+ return not isinstance(self.item_type, Undefined)
646
+
647
+ @property
648
+ def iterator_type(self):
649
+ return ListTypeIterableType(self).iterator_type
650
+
651
+ @classmethod
652
+ def refine(cls, itemty):
653
+ """Refine to a precise list type
654
+ """
655
+ res = cls(itemty)
656
+ assert res.is_precise()
657
+ return res
658
+
659
+ def unify(self, typingctx, other):
660
+ """
661
+ Unify this with the *other* list.
662
+ """
663
+ # If other is list
664
+ if isinstance(other, ListType):
665
+ if not other.is_precise():
666
+ return self
667
+
668
+ def __repr__(self):
669
+ return f"ListType({self.item_type})"
670
+
671
+
672
+ class ListTypeIterableType(SimpleIterableType):
673
+ """List iterable type
674
+ """
675
+
676
+ def __init__(self, parent):
677
+ assert isinstance(parent, ListType)
678
+ self.parent = parent
679
+ self.yield_type = self.parent.item_type
680
+ name = "list[{}]".format(self.parent.name)
681
+ iterator_type = ListTypeIteratorType(self)
682
+ super(ListTypeIterableType, self).__init__(name, iterator_type)
683
+
684
+
685
+ class ListTypeIteratorType(SimpleIteratorType):
686
+ def __init__(self, iterable):
687
+ self.parent = iterable.parent
688
+ self.iterable = iterable
689
+ yield_type = iterable.yield_type
690
+ name = "iter[{}->{}]".format(iterable.parent, yield_type)
691
+ super(ListTypeIteratorType, self).__init__(name, yield_type)
692
+
693
+
694
+ def _sentry_forbidden_types(key, value):
695
+ # Forbids List and Set for now
696
+ if isinstance(key, (Set, List)):
697
+ raise TypingError("{} as key is forbidden".format(key))
698
+ if isinstance(value, (Set, List)):
699
+ raise TypingError("{} as value is forbidden".format(value))
700
+
701
+
702
+ class DictType(IterableType, InitialValue):
703
+ """Dictionary type
704
+ """
705
+
706
+ def __init__(self, keyty, valty, initial_value=None):
707
+ assert not isinstance(keyty, TypeRef)
708
+ assert not isinstance(valty, TypeRef)
709
+ keyty = unliteral(keyty)
710
+ valty = unliteral(valty)
711
+ if isinstance(keyty, (Optional, NoneType)):
712
+ fmt = "Dict.key_type cannot be of type {}"
713
+ raise TypingError(fmt.format(keyty))
714
+ if isinstance(valty, (Optional, NoneType)):
715
+ fmt = "Dict.value_type cannot be of type {}"
716
+ raise TypingError(fmt.format(valty))
717
+ _sentry_forbidden_types(keyty, valty)
718
+ self.key_type = keyty
719
+ self.value_type = valty
720
+ self.keyvalue_type = Tuple([keyty, valty])
721
+ name = "{}[{},{}]<iv={}>".format(
722
+ self.__class__.__name__, keyty, valty, initial_value
723
+ )
724
+ super(DictType, self).__init__(name)
725
+ InitialValue.__init__(self, initial_value)
726
+
727
+ def is_precise(self):
728
+ return not any(
729
+ (
730
+ isinstance(self.key_type, Undefined),
731
+ isinstance(self.value_type, Undefined),
732
+ )
733
+ )
734
+
735
+ @property
736
+ def iterator_type(self):
737
+ return DictKeysIterableType(self).iterator_type
738
+
739
+ @classmethod
740
+ def refine(cls, keyty, valty):
741
+ """Refine to a precise dictionary type
742
+ """
743
+ res = cls(keyty, valty)
744
+ assert res.is_precise()
745
+ return res
746
+
747
+ def unify(self, typingctx, other):
748
+ """
749
+ Unify this with the *other* dictionary.
750
+ """
751
+ # If other is dict
752
+ if isinstance(other, DictType):
753
+ if not other.is_precise():
754
+ return self
755
+ else:
756
+ ukey_type = self.key_type == other.key_type
757
+ uvalue_type = self.value_type == other.value_type
758
+ if ukey_type and uvalue_type:
759
+ siv = self.initial_value
760
+ oiv = other.initial_value
761
+ siv_none = siv is None
762
+ oiv_none = oiv is None
763
+ if not siv_none and not oiv_none:
764
+ if siv == oiv:
765
+ return DictType(self.key_type, other.value_type,
766
+ siv)
767
+ return DictType(self.key_type, other.value_type)
768
+
769
+ @property
770
+ def key(self):
771
+ return self.key_type, self.value_type, str(self.initial_value)
772
+
773
+ def __unliteral__(self):
774
+ return DictType(self.key_type, self.value_type)
775
+
776
+ def __repr__(self):
777
+ return f"DictType({self.key_type}, {self.value_type})"
778
+
779
+
780
+ class LiteralStrKeyDict(Literal, ConstSized, Hashable):
781
+ """A Dictionary of string keys to heterogeneous values (basically a
782
+ namedtuple with dict semantics).
783
+ """
784
+
785
+ class FakeNamedTuple(pySequence):
786
+ # This is namedtuple-like and is a workaround for #6518 and #7416.
787
+ # This has the couple of namedtuple properties that are used by Numba's
788
+ # internals but avoids use of an actual namedtuple as it cannot have
789
+ # numeric field names, i.e. `namedtuple('foo', '0 1')` is invalid.
790
+ def __init__(self, name, keys):
791
+ self.__name__ = name
792
+ self._fields = tuple(keys)
793
+ super(LiteralStrKeyDict.FakeNamedTuple, self).__init__()
794
+
795
+ def __len__(self):
796
+ return len(self._fields)
797
+
798
+ def __getitem__(self, key):
799
+ return self._fields[key]
800
+
801
+ mutable = False
802
+
803
+ def __init__(self, literal_value, value_index=None):
804
+ self._literal_init(literal_value)
805
+ self.value_index = value_index
806
+ strkeys = [x.literal_value for x in literal_value.keys()]
807
+ self.tuple_ty = self.FakeNamedTuple("_ntclazz", strkeys)
808
+ tys = [x for x in literal_value.values()]
809
+ self.types = tuple(tys)
810
+ self.count = len(self.types)
811
+ self.fields = tuple(self.tuple_ty._fields)
812
+ self.instance_class = self.tuple_ty
813
+ self.name = "LiteralStrKey[Dict]({})".format(literal_value)
814
+
815
+ def __unliteral__(self):
816
+ return Poison(self)
817
+
818
+ def unify(self, typingctx, other):
819
+ """
820
+ Unify this with the *other* one.
821
+ """
822
+ if isinstance(other, LiteralStrKeyDict):
823
+ tys = []
824
+ for (k1, v1), (k2, v2) in zip(
825
+ self.literal_value.items(), other.literal_value.items()
826
+ ):
827
+ if k1 != k2: # keys must be same
828
+ break
829
+ tys.append(typingctx.unify_pairs(v1, v2))
830
+ else:
831
+ if all(tys):
832
+ d = {k: v for k, v in zip(self.literal_value.keys(), tys)}
833
+ return LiteralStrKeyDict(d)
834
+
835
+ def __len__(self):
836
+ return len(self.types)
837
+
838
+ def __iter__(self):
839
+ return iter(self.types)
840
+
841
+ @property
842
+ def key(self):
843
+ # use the namedtuple fields not the namedtuple itself as it's created
844
+ # locally in the ctor and comparison would always be False.
845
+ return self.tuple_ty._fields, self.types, str(self.literal_value)
846
+
847
+
848
+ class DictItemsIterableType(SimpleIterableType):
849
+ """Dictionary iterable type for .items()
850
+ """
851
+
852
+ def __init__(self, parent):
853
+ assert isinstance(parent, DictType)
854
+ self.parent = parent
855
+ self.yield_type = self.parent.keyvalue_type
856
+ name = "items[{}]".format(self.parent.name)
857
+ self.name = name
858
+ iterator_type = DictIteratorType(self)
859
+ super(DictItemsIterableType, self).__init__(name, iterator_type)
860
+
861
+
862
+ class DictKeysIterableType(SimpleIterableType):
863
+ """Dictionary iterable type for .keys()
864
+ """
865
+
866
+ def __init__(self, parent):
867
+ assert isinstance(parent, DictType)
868
+ self.parent = parent
869
+ self.yield_type = self.parent.key_type
870
+ name = "keys[{}]".format(self.parent.name)
871
+ self.name = name
872
+ iterator_type = DictIteratorType(self)
873
+ super(DictKeysIterableType, self).__init__(name, iterator_type)
874
+
875
+
876
+ class DictValuesIterableType(SimpleIterableType):
877
+ """Dictionary iterable type for .values()
878
+ """
879
+
880
+ def __init__(self, parent):
881
+ assert isinstance(parent, DictType)
882
+ self.parent = parent
883
+ self.yield_type = self.parent.value_type
884
+ name = "values[{}]".format(self.parent.name)
885
+ self.name = name
886
+ iterator_type = DictIteratorType(self)
887
+ super(DictValuesIterableType, self).__init__(name, iterator_type)
888
+
889
+
890
+ class DictIteratorType(SimpleIteratorType):
891
+ def __init__(self, iterable):
892
+ self.parent = iterable.parent
893
+ self.iterable = iterable
894
+ yield_type = iterable.yield_type
895
+ name = "iter[{}->{}],{}".format(
896
+ iterable.parent, yield_type, iterable.name
897
+ )
898
+ super(DictIteratorType, self).__init__(name, yield_type)
899
+
900
+
901
+ class StructRef(Type):
902
+ """A mutable struct.
903
+ """
904
+
905
+ def __init__(self, fields):
906
+ """
907
+ Parameters
908
+ ----------
909
+ fields : Sequence
910
+ A sequence of field descriptions, which is a 2-tuple-like object
911
+ containing `(name, type)`, where `name` is a `str` for the field
912
+ name, and `type` is a numba type for the field type.
913
+ """
914
+
915
+ def check_field_pair(fieldpair):
916
+ name, typ = fieldpair
917
+ if not isinstance(name, str):
918
+ msg = "expecting a str for field name"
919
+ raise ValueError(msg)
920
+ if not isinstance(typ, Type):
921
+ msg = "expecting a Numba Type for field type"
922
+ raise ValueError(msg)
923
+ return name, typ
924
+
925
+ fields = tuple(map(check_field_pair, fields))
926
+ self._fields = tuple(map(check_field_pair,
927
+ self.preprocess_fields(fields)))
928
+ self._typename = self.__class__.__qualname__
929
+ name = f"numba.{self._typename}{self._fields}"
930
+ super().__init__(name=name)
931
+
932
+ def preprocess_fields(self, fields):
933
+ """Subclasses can override this to do additional clean up on fields.
934
+
935
+ The default is an identity function.
936
+
937
+ Parameters:
938
+ -----------
939
+ fields : Sequence[Tuple[str, Type]]
940
+ """
941
+ return fields
942
+
943
+ @property
944
+ def field_dict(self):
945
+ """Return an immutable mapping for the field names and their
946
+ corresponding types.
947
+ """
948
+ return MappingProxyType(dict(self._fields))
949
+
950
+ def get_data_type(self):
951
+ """Get the payload type for the actual underlying structure referred
952
+ to by this struct reference.
953
+
954
+ See also: `ClassInstanceType.get_data_type`
955
+ """
956
+ return StructRefPayload(
957
+ typename=self.__class__.__name__, fields=self._fields,
958
+ )
959
+
960
+
961
+ class StructRefPayload(Type):
962
+ """The type of the payload of a mutable struct.
963
+ """
964
+
965
+ mutable = True
966
+
967
+ def __init__(self, typename, fields):
968
+ self._typename = typename
969
+ self._fields = tuple(fields)
970
+ super().__init__(name=f"numba.{typename}{self._fields}.payload")
971
+
972
+ @property
973
+ def field_dict(self):
974
+ return MappingProxyType(dict(self._fields))
lib/python3.10/site-packages/numba/core/types/function_type.py ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ __all__ = ['FunctionType', 'UndefinedFunctionType', 'FunctionPrototype',
3
+ 'WrapperAddressProtocol', 'CompileResultWAP']
4
+
5
+ from abc import ABC, abstractmethod
6
+ from .abstract import Type
7
+ from .. import types, errors
8
+
9
+
10
+ class FunctionType(Type):
11
+ """
12
+ First-class function type.
13
+ """
14
+
15
+ cconv = None
16
+
17
+ def __init__(self, signature):
18
+ sig = types.unliteral(signature)
19
+ self.nargs = len(sig.args)
20
+ self.signature = sig
21
+ self.ftype = FunctionPrototype(sig.return_type, sig.args)
22
+ self._key = self.ftype.key
23
+
24
+ @property
25
+ def key(self):
26
+ return self._key
27
+
28
+ @property
29
+ def name(self):
30
+ return f'{type(self).__name__}[{self.key}]'
31
+
32
+ def is_precise(self):
33
+ return self.signature.is_precise()
34
+
35
+ def get_precise(self):
36
+ return self
37
+
38
+ def dump(self, tab=''):
39
+ print(f'{tab}DUMP {type(self).__name__}[code={self._code}]')
40
+ self.signature.dump(tab=tab + ' ')
41
+ print(f'{tab}END DUMP {type(self).__name__}')
42
+
43
+ def get_call_type(self, context, args, kws):
44
+ from numba.core import typing
45
+
46
+ if kws:
47
+ # First-class functions carry only the type signature
48
+ # information and function address value. So, it is not
49
+ # possible to determine the positional arguments
50
+ # corresponding to the keyword arguments in the call
51
+ # expression. For instance, the definition of the
52
+ # first-class function may not use the same argument names
53
+ # that the caller assumes. [numba/issues/5540].
54
+ raise errors.UnsupportedError(
55
+ 'first-class function call cannot use keyword arguments')
56
+
57
+ if len(args) != self.nargs:
58
+ raise ValueError(
59
+ f'mismatch of arguments number: {len(args)} vs {self.nargs}')
60
+
61
+ sig = self.signature
62
+
63
+ # check that arguments types match with the signature types exactly
64
+ for atype, sig_atype in zip(args, sig.args):
65
+ atype = types.unliteral(atype)
66
+ if sig_atype.is_precise():
67
+ conv_score = context.context.can_convert(
68
+ fromty=atype, toty=sig_atype
69
+ )
70
+ if conv_score is None \
71
+ or conv_score > typing.context.Conversion.safe:
72
+ raise ValueError(
73
+ f'mismatch of argument types: {atype} vs {sig_atype}')
74
+
75
+ if not sig.is_precise():
76
+ for dispatcher in self.dispatchers:
77
+ template, pysig, args, kws \
78
+ = dispatcher.get_call_template(args, kws)
79
+ new_sig = template(context.context).apply(args, kws)
80
+ return types.unliteral(new_sig)
81
+
82
+ return sig
83
+
84
+ def check_signature(self, other_sig):
85
+ """Return True if signatures match (up to being precise).
86
+ """
87
+ sig = self.signature
88
+ return (self.nargs == len(other_sig.args)
89
+ and (sig == other_sig or not sig.is_precise()))
90
+
91
+ def unify(self, context, other):
92
+ if isinstance(other, types.UndefinedFunctionType) \
93
+ and self.nargs == other.nargs:
94
+ return self
95
+
96
+
97
+ class UndefinedFunctionType(FunctionType):
98
+
99
+ _counter = 0
100
+
101
+ def __init__(self, nargs, dispatchers):
102
+ from numba.core.typing.templates import Signature
103
+ signature = Signature(types.undefined,
104
+ (types.undefined,) * nargs, recvr=None)
105
+
106
+ super(UndefinedFunctionType, self).__init__(signature)
107
+
108
+ self.dispatchers = dispatchers
109
+
110
+ # make the undefined function type instance unique
111
+ type(self)._counter += 1
112
+ self._key += str(type(self)._counter)
113
+
114
+ def get_precise(self):
115
+ """
116
+ Return precise function type if possible.
117
+ """
118
+ for dispatcher in self.dispatchers:
119
+ for cres in dispatcher.overloads.values():
120
+ sig = types.unliteral(cres.signature)
121
+ return FunctionType(sig)
122
+ return self
123
+
124
+
125
+ class FunctionPrototype(Type):
126
+ """
127
+ Represents the prototype of a first-class function type.
128
+ Used internally.
129
+ """
130
+ cconv = None
131
+
132
+ def __init__(self, rtype, atypes):
133
+ self.rtype = rtype
134
+ self.atypes = tuple(atypes)
135
+
136
+ assert isinstance(rtype, Type), (rtype)
137
+ lst = []
138
+ for atype in self.atypes:
139
+ assert isinstance(atype, Type), (atype)
140
+ lst.append(atype.name)
141
+ name = '%s(%s)' % (rtype, ', '.join(lst))
142
+
143
+ super(FunctionPrototype, self).__init__(name)
144
+
145
+ @property
146
+ def key(self):
147
+ return self.name
148
+
149
+
150
+ class WrapperAddressProtocol(ABC):
151
+ """Base class for Wrapper Address Protocol.
152
+
153
+ Objects that inherit from the WrapperAddressProtocol can be passed
154
+ as arguments to Numba jit compiled functions where it can be used
155
+ as first-class functions. As a minimum, the derived types must
156
+ implement two methods ``__wrapper_address__`` and ``signature``.
157
+ """
158
+
159
+ @abstractmethod
160
+ def __wrapper_address__(self):
161
+ """Return the address of a first-class function.
162
+
163
+ Returns
164
+ -------
165
+ addr : int
166
+ """
167
+
168
+ @abstractmethod
169
+ def signature(self):
170
+ """Return the signature of a first-class function.
171
+
172
+ Returns
173
+ -------
174
+ sig : Signature
175
+ The returned Signature instance represents the type of a
176
+ first-class function that the given WrapperAddressProtocol
177
+ instance represents.
178
+ """
179
+
180
+
181
+ class CompileResultWAP(WrapperAddressProtocol):
182
+ """Wrapper of dispatcher instance compilation result to turn it a
183
+ first-class function.
184
+ """
185
+
186
+ def __init__(self, cres):
187
+ """
188
+ Parameters
189
+ ----------
190
+ cres : CompileResult
191
+ Specify compilation result of a Numba jit-decorated function
192
+ (that is a value of dispatcher instance ``overloads``
193
+ attribute)
194
+ """
195
+ self.cres = cres
196
+ name = getattr(cres.fndesc, 'llvm_cfunc_wrapper_name')
197
+ self.address = cres.library.get_pointer_to_function(name)
198
+
199
+ def dump(self, tab=''):
200
+ print(f'{tab}DUMP {type(self).__name__} [addr={self.address}]')
201
+ self.cres.signature.dump(tab=tab + ' ')
202
+ print(f'{tab}END DUMP {type(self).__name__}')
203
+
204
+ def __wrapper_address__(self):
205
+ return self.address
206
+
207
+ def signature(self):
208
+ return self.cres.signature
209
+
210
+ def __call__(self, *args, **kwargs): # used in object-mode
211
+ return self.cres.entry_point(*args, **kwargs)
lib/python3.10/site-packages/numba/core/types/functions.py ADDED
@@ -0,0 +1,743 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import traceback
2
+ from collections import namedtuple, defaultdict
3
+ import itertools
4
+ import logging
5
+ import textwrap
6
+ from shutil import get_terminal_size
7
+
8
+ from .abstract import Callable, DTypeSpec, Dummy, Literal, Type, weakref
9
+ from .common import Opaque
10
+ from .misc import unliteral
11
+ from numba.core import errors, utils, types, config
12
+ from numba.core.typeconv import Conversion
13
+
14
+ _logger = logging.getLogger(__name__)
15
+
16
+
17
+ # terminal color markup
18
+ _termcolor = errors.termcolor()
19
+
20
+ _FAILURE = namedtuple('_FAILURE', 'template matched error literal')
21
+
22
+ _termwidth = get_terminal_size().columns
23
+
24
+
25
+ # pull out the lead line as unit tests often use this
26
+ _header_lead = "No implementation of function"
27
+ _header_template = (_header_lead + " {the_function} found for signature:\n \n "
28
+ ">>> {fname}({signature})\n \nThere are {ncandidates} "
29
+ "candidate implementations:")
30
+
31
+ _reason_template = """
32
+ " - Of which {nmatches} did not match due to:\n
33
+ """
34
+
35
+
36
+ def _wrapper(tmp, indent=0):
37
+ return textwrap.indent(tmp, ' ' * indent, lambda line: True)
38
+
39
+
40
+ _overload_template = ("- Of which {nduplicates} did not match due to:\n"
41
+ "{kind} {inof} function '{function}': File: {file}: "
42
+ "Line {line}.\n With argument(s): '({args})':")
43
+
44
+
45
+ _err_reasons = {'specific_error': "Rejected as the implementation raised a "
46
+ "specific error:\n{}"}
47
+
48
+
49
+ def _bt_as_lines(bt):
50
+ """
51
+ Converts a backtrace into a list of lines, squashes it a bit on the way.
52
+ """
53
+ return [y for y in itertools.chain(*[x.split('\n') for x in bt]) if y]
54
+
55
+
56
+ def argsnkwargs_to_str(args, kwargs):
57
+ buf = [str(a) for a in tuple(args)]
58
+ buf.extend(["{}={}".format(k, v) for k, v in kwargs.items()])
59
+ return ', '.join(buf)
60
+
61
+
62
+ class _ResolutionFailures(object):
63
+ """Collect and format function resolution failures.
64
+ """
65
+ def __init__(self, context, function_type, args, kwargs, depth=0):
66
+ self._context = context
67
+ self._function_type = function_type
68
+ self._args = args
69
+ self._kwargs = kwargs
70
+ self._failures = defaultdict(list)
71
+ self._depth = depth
72
+ self._max_depth = 5
73
+ self._scale = 2
74
+
75
+ def __len__(self):
76
+ return len(self._failures)
77
+
78
+ def add_error(self, calltemplate, matched, error, literal):
79
+ """
80
+ Args
81
+ ----
82
+ calltemplate : CallTemplate
83
+ error : Exception or str
84
+ Error message
85
+ """
86
+ isexc = isinstance(error, Exception)
87
+ errclazz = '%s: ' % type(error).__name__ if isexc else ''
88
+
89
+ key = "{}{}".format(errclazz, str(error))
90
+ self._failures[key].append(_FAILURE(calltemplate, matched, error,
91
+ literal))
92
+
93
+ def format(self):
94
+ """Return a formatted error message from all the gathered errors.
95
+ """
96
+ indent = ' ' * self._scale
97
+ argstr = argsnkwargs_to_str(self._args, self._kwargs)
98
+ ncandidates = sum([len(x) for x in self._failures.values()])
99
+
100
+ # sort out a display name for the function
101
+ tykey = self._function_type.typing_key
102
+ # most things have __name__
103
+ fname = getattr(tykey, '__name__', None)
104
+ is_external_fn_ptr = isinstance(self._function_type,
105
+ ExternalFunctionPointer)
106
+
107
+ if fname is None:
108
+ if is_external_fn_ptr:
109
+ fname = "ExternalFunctionPointer"
110
+ else:
111
+ fname = "<unknown function>"
112
+
113
+ msgbuf = [_header_template.format(the_function=self._function_type,
114
+ fname=fname,
115
+ signature=argstr,
116
+ ncandidates=ncandidates)]
117
+ nolitargs = tuple([unliteral(a) for a in self._args])
118
+ nolitkwargs = {k: unliteral(v) for k, v in self._kwargs.items()}
119
+ nolitargstr = argsnkwargs_to_str(nolitargs, nolitkwargs)
120
+
121
+ # depth could potentially get massive, so limit it.
122
+ ldepth = min(max(self._depth, 0), self._max_depth)
123
+
124
+ def template_info(tp):
125
+ src_info = tp.get_template_info()
126
+ unknown = "unknown"
127
+ source_name = src_info.get('name', unknown)
128
+ source_file = src_info.get('filename', unknown)
129
+ source_lines = src_info.get('lines', unknown)
130
+ source_kind = src_info.get('kind', 'Unknown template')
131
+ return source_name, source_file, source_lines, source_kind
132
+
133
+ for i, (k, err_list) in enumerate(self._failures.items()):
134
+ err = err_list[0]
135
+ nduplicates = len(err_list)
136
+ template, error = err.template, err.error
137
+ ifo = template_info(template)
138
+ source_name, source_file, source_lines, source_kind = ifo
139
+ largstr = argstr if err.literal else nolitargstr
140
+
141
+ if err.error == "No match.":
142
+ err_dict = defaultdict(set)
143
+ for errs in err_list:
144
+ err_dict[errs.template].add(errs.literal)
145
+ # if there's just one template, and it's erroring on
146
+ # literal/nonliteral be specific
147
+ if len(err_dict) == 1:
148
+ template = [_ for _ in err_dict.keys()][0]
149
+ source_name, source_file, source_lines, source_kind = \
150
+ template_info(template)
151
+ source_lines = source_lines[0]
152
+ else:
153
+ source_file = "<numerous>"
154
+ source_lines = "N/A"
155
+
156
+ msgbuf.append(_termcolor.errmsg(
157
+ _wrapper(_overload_template.format(nduplicates=nduplicates,
158
+ kind=source_kind.title(),
159
+ function=fname,
160
+ inof='of',
161
+ file=source_file,
162
+ line=source_lines,
163
+ args=largstr),
164
+ ldepth + 1)))
165
+ msgbuf.append(_termcolor.highlight(_wrapper(err.error,
166
+ ldepth + 2)))
167
+ else:
168
+ # There was at least one match in this failure class, but it
169
+ # failed for a specific reason try and report this.
170
+ msgbuf.append(_termcolor.errmsg(
171
+ _wrapper(_overload_template.format(nduplicates=nduplicates,
172
+ kind=source_kind.title(),
173
+ function=source_name,
174
+ inof='in',
175
+ file=source_file,
176
+ line=source_lines[0],
177
+ args=largstr),
178
+ ldepth + 1)))
179
+
180
+ if isinstance(error, BaseException):
181
+ reason = indent + self.format_error(error)
182
+ errstr = _err_reasons['specific_error'].format(reason)
183
+ else:
184
+ errstr = error
185
+ # if you are a developer, show the back traces
186
+ if config.DEVELOPER_MODE:
187
+ if isinstance(error, BaseException):
188
+ # if the error is an actual exception instance, trace it
189
+ bt = traceback.format_exception(type(error), error,
190
+ error.__traceback__)
191
+ else:
192
+ bt = [""]
193
+ bt_as_lines = _bt_as_lines(bt)
194
+ nd2indent = '\n{}'.format(2 * indent)
195
+ errstr += _termcolor.reset(nd2indent +
196
+ nd2indent.join(bt_as_lines))
197
+ msgbuf.append(_termcolor.highlight(_wrapper(errstr,
198
+ ldepth + 2)))
199
+ loc = self.get_loc(template, error)
200
+ if loc:
201
+ msgbuf.append('{}raised from {}'.format(indent, loc))
202
+
203
+ # the commented bit rewraps each block, may not be helpful?!
204
+ return _wrapper('\n'.join(msgbuf) + '\n') # , self._scale * ldepth)
205
+
206
+ def format_error(self, error):
207
+ """Format error message or exception
208
+ """
209
+ if isinstance(error, Exception):
210
+ return '{}: {}'.format(type(error).__name__, error)
211
+ else:
212
+ return '{}'.format(error)
213
+
214
+ def get_loc(self, classtemplate, error):
215
+ """Get source location information from the error message.
216
+ """
217
+ if isinstance(error, Exception) and hasattr(error, '__traceback__'):
218
+ # traceback is unavailable in py2
219
+ frame = traceback.extract_tb(error.__traceback__)[-1]
220
+ return "{}:{}".format(frame[0], frame[1])
221
+
222
+ def raise_error(self):
223
+ for faillist in self._failures.values():
224
+ for fail in faillist:
225
+ if isinstance(fail.error, errors.ForceLiteralArg):
226
+ raise fail.error
227
+ raise errors.TypingError(self.format())
228
+
229
+
230
+ def _unlit_non_poison(ty):
231
+ """Apply unliteral(ty) and raise a TypingError if type is Poison.
232
+ """
233
+ out = unliteral(ty)
234
+ if isinstance(out, types.Poison):
235
+ m = f"Poison type used in arguments; got {out}"
236
+ raise errors.TypingError(m)
237
+ return out
238
+
239
+
240
+ class BaseFunction(Callable):
241
+ """
242
+ Base type class for some function types.
243
+ """
244
+
245
+ def __init__(self, template):
246
+
247
+ if isinstance(template, (list, tuple)):
248
+ self.templates = tuple(template)
249
+ keys = set(temp.key for temp in self.templates)
250
+ if len(keys) != 1:
251
+ raise ValueError("incompatible templates: keys = %s"
252
+ % (keys,))
253
+ self.typing_key, = keys
254
+ else:
255
+ self.templates = (template,)
256
+ self.typing_key = template.key
257
+ self._impl_keys = {}
258
+ name = "%s(%s)" % (self.__class__.__name__, self.typing_key)
259
+ self._depth = 0
260
+ super(BaseFunction, self).__init__(name)
261
+
262
+ @property
263
+ def key(self):
264
+ return self.typing_key, self.templates
265
+
266
+ def augment(self, other):
267
+ """
268
+ Augment this function type with the other function types' templates,
269
+ so as to support more input types.
270
+ """
271
+ if type(other) is type(self) and other.typing_key == self.typing_key:
272
+ return type(self)(self.templates + other.templates)
273
+
274
+ def get_impl_key(self, sig):
275
+ """
276
+ Get the implementation key (used by the target context) for the
277
+ given signature.
278
+ """
279
+ return self._impl_keys[sig.args]
280
+
281
+ def get_call_type(self, context, args, kws):
282
+
283
+ prefer_lit = [True, False] # old behavior preferring literal
284
+ prefer_not = [False, True] # new behavior preferring non-literal
285
+ failures = _ResolutionFailures(context, self, args, kws,
286
+ depth=self._depth)
287
+
288
+ # get the order in which to try templates
289
+ from numba.core.target_extension import get_local_target # circular
290
+ target_hw = get_local_target(context)
291
+ order = utils.order_by_target_specificity(target_hw, self.templates,
292
+ fnkey=self.key[0])
293
+
294
+ self._depth += 1
295
+
296
+ for temp_cls in order:
297
+ temp = temp_cls(context)
298
+ # The template can override the default and prefer literal args
299
+ choice = prefer_lit if temp.prefer_literal else prefer_not
300
+ for uselit in choice:
301
+ try:
302
+ if uselit:
303
+ sig = temp.apply(args, kws)
304
+ else:
305
+ nolitargs = tuple([_unlit_non_poison(a) for a in args])
306
+ nolitkws = {k: _unlit_non_poison(v)
307
+ for k, v in kws.items()}
308
+ sig = temp.apply(nolitargs, nolitkws)
309
+ except Exception as e:
310
+ if not isinstance(e, errors.NumbaError):
311
+ raise e
312
+ sig = None
313
+ failures.add_error(temp, False, e, uselit)
314
+ else:
315
+ if sig is not None:
316
+ self._impl_keys[sig.args] = temp.get_impl_key(sig)
317
+ self._depth -= 1
318
+ return sig
319
+ else:
320
+ registered_sigs = getattr(temp, 'cases', None)
321
+ if registered_sigs is not None:
322
+ msg = "No match for registered cases:\n%s"
323
+ msg = msg % '\n'.join(" * {}".format(x) for x in
324
+ registered_sigs)
325
+ else:
326
+ msg = 'No match.'
327
+ failures.add_error(temp, True, msg, uselit)
328
+
329
+ failures.raise_error()
330
+
331
+ def get_call_signatures(self):
332
+ sigs = []
333
+ is_param = False
334
+ for temp in self.templates:
335
+ sigs += getattr(temp, 'cases', [])
336
+ is_param = is_param or hasattr(temp, 'generic')
337
+ return sigs, is_param
338
+
339
+
340
+ class Function(BaseFunction, Opaque):
341
+ """
342
+ Type class for builtin functions implemented by Numba.
343
+ """
344
+
345
+
346
+ class BoundFunction(Callable, Opaque):
347
+ """
348
+ A function with an implicit first argument (denoted as *this* below).
349
+ """
350
+
351
+ def __init__(self, template, this):
352
+ # Create a derived template with an attribute *this*
353
+ newcls = type(template.__name__ + '.' + str(this), (template,),
354
+ dict(this=this))
355
+ self.template = newcls
356
+ self.typing_key = self.template.key
357
+ self.this = this
358
+ name = "%s(%s for %s)" % (self.__class__.__name__,
359
+ self.typing_key, self.this)
360
+ super(BoundFunction, self).__init__(name)
361
+
362
+ def unify(self, typingctx, other):
363
+ if (isinstance(other, BoundFunction) and
364
+ self.typing_key == other.typing_key):
365
+ this = typingctx.unify_pairs(self.this, other.this)
366
+ if this is not None:
367
+ # XXX is it right that both template instances are distinct?
368
+ return self.copy(this=this)
369
+
370
+ def copy(self, this):
371
+ return type(self)(self.template, this)
372
+
373
+ @property
374
+ def key(self):
375
+ # FIXME: With target-overload, the MethodTemplate can change depending
376
+ # on the target.
377
+ unique_impl = getattr(self.template, "_overload_func", None)
378
+ return self.typing_key, self.this, unique_impl
379
+
380
+ def get_impl_key(self, sig):
381
+ """
382
+ Get the implementation key (used by the target context) for the
383
+ given signature.
384
+ """
385
+ return self.typing_key
386
+
387
+ def get_call_type(self, context, args, kws):
388
+ template = self.template(context)
389
+ literal_e = None
390
+ nonliteral_e = None
391
+ out = None
392
+
393
+ choice = [True, False] if template.prefer_literal else [False, True]
394
+ for uselit in choice:
395
+ if uselit:
396
+ # Try with Literal
397
+ try:
398
+ out = template.apply(args, kws)
399
+ except Exception as exc:
400
+ if not isinstance(exc, errors.NumbaError):
401
+ raise exc
402
+ if isinstance(exc, errors.ForceLiteralArg):
403
+ raise exc
404
+ literal_e = exc
405
+ out = None
406
+ else:
407
+ break
408
+ else:
409
+ # if the unliteral_args and unliteral_kws are the same as the
410
+ # literal ones, set up to not bother retrying
411
+ unliteral_args = tuple([_unlit_non_poison(a) for a in args])
412
+ unliteral_kws = {k: _unlit_non_poison(v)
413
+ for k, v in kws.items()}
414
+ skip = unliteral_args == args and kws == unliteral_kws
415
+
416
+ # If the above template application failed and the non-literal
417
+ # args are different to the literal ones, try again with
418
+ # literals rewritten as non-literals
419
+ if not skip and out is None:
420
+ try:
421
+ out = template.apply(unliteral_args, unliteral_kws)
422
+ except Exception as exc:
423
+ if isinstance(exc, errors.ForceLiteralArg):
424
+ if template.prefer_literal:
425
+ # For template that prefers literal types,
426
+ # reaching here means that the literal types
427
+ # have failed typing as well.
428
+ raise exc
429
+ nonliteral_e = exc
430
+ else:
431
+ break
432
+
433
+ if out is None and (nonliteral_e is not None or literal_e is not None):
434
+ header = "- Resolution failure for {} arguments:\n{}\n"
435
+ tmplt = _termcolor.highlight(header)
436
+ if config.DEVELOPER_MODE:
437
+ indent = ' ' * 4
438
+
439
+ def add_bt(error):
440
+ if isinstance(error, BaseException):
441
+ # if the error is an actual exception instance, trace it
442
+ bt = traceback.format_exception(type(error), error,
443
+ error.__traceback__)
444
+ else:
445
+ bt = [""]
446
+ nd2indent = '\n{}'.format(2 * indent)
447
+ errstr = _termcolor.reset(nd2indent +
448
+ nd2indent.join(_bt_as_lines(bt)))
449
+ return _termcolor.reset(errstr)
450
+ else:
451
+ add_bt = lambda X: ''
452
+
453
+ def nested_msg(literalness, e):
454
+ estr = str(e)
455
+ estr = estr if estr else (str(repr(e)) + add_bt(e))
456
+ new_e = errors.TypingError(textwrap.dedent(estr))
457
+ return tmplt.format(literalness, str(new_e))
458
+
459
+ raise errors.TypingError(nested_msg('literal', literal_e) +
460
+ nested_msg('non-literal', nonliteral_e))
461
+ return out
462
+
463
+ def get_call_signatures(self):
464
+ sigs = getattr(self.template, 'cases', [])
465
+ is_param = hasattr(self.template, 'generic')
466
+ return sigs, is_param
467
+
468
+
469
+ class MakeFunctionLiteral(Literal, Opaque):
470
+ pass
471
+
472
+
473
+ class _PickleableWeakRef(weakref.ref):
474
+ """
475
+ Allow a weakref to be pickled.
476
+
477
+ Note that if the object referred to is not kept alive elsewhere in the
478
+ pickle, the weakref will immediately expire after being constructed.
479
+ """
480
+ def __getnewargs__(self):
481
+ obj = self()
482
+ if obj is None:
483
+ raise ReferenceError("underlying object has vanished")
484
+ return (obj,)
485
+
486
+
487
+ class WeakType(Type):
488
+ """
489
+ Base class for types parametered by a mortal object, to which only
490
+ a weak reference is kept.
491
+ """
492
+
493
+ def _store_object(self, obj):
494
+ self._wr = _PickleableWeakRef(obj)
495
+
496
+ def _get_object(self):
497
+ obj = self._wr()
498
+ if obj is None:
499
+ raise ReferenceError("underlying object has vanished")
500
+ return obj
501
+
502
+ @property
503
+ def key(self):
504
+ return self._wr
505
+
506
+ def __eq__(self, other):
507
+ if type(self) is type(other):
508
+ obj = self._wr()
509
+ return obj is not None and obj is other._wr()
510
+ return NotImplemented
511
+
512
+ def __hash__(self):
513
+ return Type.__hash__(self)
514
+
515
+
516
+ class Dispatcher(WeakType, Callable, Dummy):
517
+ """
518
+ Type class for @jit-compiled functions.
519
+ """
520
+
521
+ def __init__(self, dispatcher):
522
+ self._store_object(dispatcher)
523
+ super(Dispatcher, self).__init__("type(%s)" % dispatcher)
524
+
525
+ def dump(self, tab=''):
526
+ print((f'{tab}DUMP {type(self).__name__}[code={self._code}, '
527
+ f'name={self.name}]'))
528
+ self.dispatcher.dump(tab=tab + ' ')
529
+ print(f'{tab}END DUMP')
530
+
531
+ def get_call_type(self, context, args, kws):
532
+ """
533
+ Resolve a call to this dispatcher using the given argument types.
534
+ A signature returned and it is ensured that a compiled specialization
535
+ is available for it.
536
+ """
537
+ template, pysig, args, kws = \
538
+ self.dispatcher.get_call_template(args, kws)
539
+ sig = template(context).apply(args, kws)
540
+ if sig:
541
+ sig = sig.replace(pysig=pysig)
542
+ return sig
543
+
544
+ def get_call_signatures(self):
545
+ sigs = self.dispatcher.nopython_signatures
546
+ return sigs, True
547
+
548
+ @property
549
+ def dispatcher(self):
550
+ """
551
+ A strong reference to the underlying numba.dispatcher.Dispatcher
552
+ instance.
553
+ """
554
+ return self._get_object()
555
+
556
+ def get_overload(self, sig):
557
+ """
558
+ Get the compiled overload for the given signature.
559
+ """
560
+ return self.dispatcher.get_overload(sig.args)
561
+
562
+ def get_impl_key(self, sig):
563
+ """
564
+ Get the implementation key for the given signature.
565
+ """
566
+ return self.get_overload(sig)
567
+
568
+ def unify(self, context, other):
569
+ return utils.unified_function_type((self, other), require_precise=False)
570
+
571
+ def can_convert_to(self, typingctx, other):
572
+ if isinstance(other, types.FunctionType):
573
+ try:
574
+ self.dispatcher.get_compile_result(other.signature)
575
+ except errors.NumbaError:
576
+ return None
577
+ else:
578
+ return Conversion.safe
579
+
580
+
581
+ class ObjModeDispatcher(Dispatcher):
582
+ """Dispatcher subclass that enters objectmode function.
583
+ """
584
+ pass
585
+
586
+
587
+ class ExternalFunctionPointer(BaseFunction):
588
+ """
589
+ A pointer to a native function (e.g. exported via ctypes or cffi).
590
+ *get_pointer* is a Python function taking an object
591
+ and returning the raw pointer value as an int.
592
+ """
593
+ def __init__(self, sig, get_pointer, cconv=None):
594
+ from numba.core.typing.templates import (AbstractTemplate,
595
+ make_concrete_template,
596
+ signature)
597
+ from numba.core.types import ffi_forced_object
598
+ if sig.return_type == ffi_forced_object:
599
+ msg = "Cannot return a pyobject from an external function"
600
+ raise errors.TypingError(msg)
601
+ self.sig = sig
602
+ self.requires_gil = any(a == ffi_forced_object for a in self.sig.args)
603
+ self.get_pointer = get_pointer
604
+ self.cconv = cconv
605
+ if self.requires_gil:
606
+ class GilRequiringDefn(AbstractTemplate):
607
+ key = self.sig
608
+
609
+ def generic(self, args, kws):
610
+ if kws:
611
+ msg = "does not support keyword arguments"
612
+ raise errors.TypingError(msg)
613
+ # Make ffi_forced_object a bottom type to allow any type to
614
+ # be casted to it. This is the only place that support
615
+ # ffi_forced_object.
616
+ coerced = [actual if formal == ffi_forced_object else formal
617
+ for actual, formal
618
+ in zip(args, self.key.args)]
619
+ return signature(self.key.return_type, *coerced)
620
+ template = GilRequiringDefn
621
+ else:
622
+ template = make_concrete_template("CFuncPtr", sig, [sig])
623
+ super(ExternalFunctionPointer, self).__init__(template)
624
+
625
+ @property
626
+ def key(self):
627
+ return self.sig, self.cconv, self.get_pointer
628
+
629
+
630
+ class ExternalFunction(Function):
631
+ """
632
+ A named native function (resolvable by LLVM) accepting an explicit
633
+ signature. For internal use only.
634
+ """
635
+
636
+ def __init__(self, symbol, sig):
637
+ from numba.core import typing
638
+ self.symbol = symbol
639
+ self.sig = sig
640
+ template = typing.make_concrete_template(symbol, symbol, [sig])
641
+ super(ExternalFunction, self).__init__(template)
642
+
643
+ @property
644
+ def key(self):
645
+ return self.symbol, self.sig
646
+
647
+
648
+ class NamedTupleClass(Callable, Opaque):
649
+ """
650
+ Type class for namedtuple classes.
651
+ """
652
+
653
+ def __init__(self, instance_class):
654
+ self.instance_class = instance_class
655
+ name = "class(%s)" % (instance_class)
656
+ super(NamedTupleClass, self).__init__(name)
657
+
658
+ def get_call_type(self, context, args, kws):
659
+ # Overridden by the __call__ constructor resolution in
660
+ # typing.collections
661
+ return None
662
+
663
+ def get_call_signatures(self):
664
+ return (), True
665
+
666
+ def get_impl_key(self, sig):
667
+ return type(self)
668
+
669
+ @property
670
+ def key(self):
671
+ return self.instance_class
672
+
673
+
674
+ class NumberClass(Callable, DTypeSpec, Opaque):
675
+ """
676
+ Type class for number classes (e.g. "np.float64").
677
+ """
678
+
679
+ def __init__(self, instance_type):
680
+ self.instance_type = instance_type
681
+ name = "class(%s)" % (instance_type,)
682
+ super(NumberClass, self).__init__(name)
683
+
684
+ def get_call_type(self, context, args, kws):
685
+ # Overridden by the __call__ constructor resolution in typing.builtins
686
+ return None
687
+
688
+ def get_call_signatures(self):
689
+ return (), True
690
+
691
+ def get_impl_key(self, sig):
692
+ return type(self)
693
+
694
+ @property
695
+ def key(self):
696
+ return self.instance_type
697
+
698
+ @property
699
+ def dtype(self):
700
+ return self.instance_type
701
+
702
+
703
+ _RecursiveCallOverloads = namedtuple("_RecursiveCallOverloads", "qualname,uid")
704
+
705
+
706
+ class RecursiveCall(Opaque):
707
+ """
708
+ Recursive call to a Dispatcher.
709
+ """
710
+ _overloads = None
711
+
712
+ def __init__(self, dispatcher_type):
713
+ assert isinstance(dispatcher_type, Dispatcher)
714
+ self.dispatcher_type = dispatcher_type
715
+ name = "recursive(%s)" % (dispatcher_type,)
716
+ super(RecursiveCall, self).__init__(name)
717
+ # Initializing for the first time
718
+ if self._overloads is None:
719
+ self._overloads = {}
720
+
721
+ def add_overloads(self, args, qualname, uid):
722
+ """Add an overload of the function.
723
+
724
+ Parameters
725
+ ----------
726
+ args :
727
+ argument types
728
+ qualname :
729
+ function qualifying name
730
+ uid :
731
+ unique id
732
+ """
733
+ self._overloads[args] = _RecursiveCallOverloads(qualname, uid)
734
+
735
+ def get_overloads(self, args):
736
+ """Get the qualifying name and unique id for the overload given the
737
+ argument types.
738
+ """
739
+ return self._overloads[args]
740
+
741
+ @property
742
+ def key(self):
743
+ return self.dispatcher_type
lib/python3.10/site-packages/numba/core/types/iterators.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .common import SimpleIterableType, SimpleIteratorType
2
+ from ..errors import TypingError
3
+
4
+
5
+ class RangeType(SimpleIterableType):
6
+
7
+ def __init__(self, dtype):
8
+ self.dtype = dtype
9
+ name = "range_state_%s" % (dtype,)
10
+ super(SimpleIterableType, self).__init__(name)
11
+ self._iterator_type = RangeIteratorType(self.dtype)
12
+
13
+ def unify(self, typingctx, other):
14
+ if isinstance(other, RangeType):
15
+ dtype = typingctx.unify_pairs(self.dtype, other.dtype)
16
+ if dtype is not None:
17
+ return RangeType(dtype)
18
+
19
+
20
+ class RangeIteratorType(SimpleIteratorType):
21
+
22
+ def __init__(self, dtype):
23
+ name = "range_iter_%s" % (dtype,)
24
+ super(SimpleIteratorType, self).__init__(name)
25
+ self._yield_type = dtype
26
+
27
+ def unify(self, typingctx, other):
28
+ if isinstance(other, RangeIteratorType):
29
+ dtype = typingctx.unify_pairs(self.yield_type, other.yield_type)
30
+ if dtype is not None:
31
+ return RangeIteratorType(dtype)
32
+
33
+
34
+ class Generator(SimpleIteratorType):
35
+ """
36
+ Type class for Numba-compiled generator objects.
37
+ """
38
+
39
+ def __init__(self, gen_func, yield_type, arg_types, state_types,
40
+ has_finalizer):
41
+ self.gen_func = gen_func
42
+ self.arg_types = tuple(arg_types)
43
+ self.state_types = tuple(state_types)
44
+ self.has_finalizer = has_finalizer
45
+ name = "%s generator(func=%s, args=%s, has_finalizer=%s)" % (
46
+ yield_type, self.gen_func, self.arg_types,
47
+ self.has_finalizer)
48
+ super(Generator, self).__init__(name, yield_type)
49
+
50
+ @property
51
+ def key(self):
52
+ return (self.gen_func, self.arg_types, self.yield_type,
53
+ self.has_finalizer, self.state_types)
54
+
55
+
56
+ class EnumerateType(SimpleIteratorType):
57
+ """
58
+ Type class for `enumerate` objects.
59
+ Type instances are parametered with the underlying source type.
60
+ """
61
+
62
+ def __init__(self, iterable_type):
63
+ from numba.core.types import Tuple, intp
64
+ self.source_type = iterable_type.iterator_type
65
+ yield_type = Tuple([intp, self.source_type.yield_type])
66
+ name = 'enumerate(%s)' % (self.source_type)
67
+ super(EnumerateType, self).__init__(name, yield_type)
68
+
69
+ @property
70
+ def key(self):
71
+ return self.source_type
72
+
73
+
74
+ class ZipType(SimpleIteratorType):
75
+ """
76
+ Type class for `zip` objects.
77
+ Type instances are parametered with the underlying source types.
78
+ """
79
+
80
+ def __init__(self, iterable_types):
81
+ from numba.core.types import Tuple
82
+ self.source_types = tuple(tp.iterator_type for tp in iterable_types)
83
+ yield_type = Tuple([tp.yield_type for tp in self.source_types])
84
+ name = 'zip(%s)' % ', '.join(str(tp) for tp in self.source_types)
85
+ super(ZipType, self).__init__(name, yield_type)
86
+
87
+ @property
88
+ def key(self):
89
+ return self.source_types
90
+
91
+
92
+ class ArrayIterator(SimpleIteratorType):
93
+ """
94
+ Type class for iterators of array and buffer objects.
95
+ """
96
+
97
+ def __init__(self, array_type):
98
+ self.array_type = array_type
99
+ name = "iter(%s)" % (self.array_type,)
100
+ nd = array_type.ndim
101
+ if nd == 0:
102
+ raise TypingError("iteration over a 0-d array")
103
+ elif nd == 1:
104
+ yield_type = array_type.dtype
105
+ else:
106
+ # iteration semantics leads to A order layout
107
+ yield_type = array_type.copy(ndim=array_type.ndim - 1, layout='A')
108
+ super(ArrayIterator, self).__init__(name, yield_type)
lib/python3.10/site-packages/numba/core/types/misc.py ADDED
@@ -0,0 +1,556 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from numba.core.types.abstract import Callable, Literal, Type, Hashable
2
+ from numba.core.types.common import (Dummy, IterableType, Opaque,
3
+ SimpleIteratorType)
4
+ from numba.core.typeconv import Conversion
5
+ from numba.core.errors import TypingError, LiteralTypingError
6
+ from numba.core.ir import UndefinedType
7
+ from numba.core.utils import get_hashable_key
8
+
9
+
10
+ class PyObject(Dummy):
11
+ """
12
+ A generic CPython object.
13
+ """
14
+
15
+ def is_precise(self):
16
+ return False
17
+
18
+
19
+ class Phantom(Dummy):
20
+ """
21
+ A type that cannot be materialized. A Phantom cannot be used as
22
+ argument or return type.
23
+ """
24
+
25
+
26
+ class Undefined(Dummy):
27
+ """
28
+ A type that is left imprecise. This is used as a temporaray placeholder
29
+ during type inference in the hope that the type can be later refined.
30
+ """
31
+
32
+ def is_precise(self):
33
+ return False
34
+
35
+
36
+ class UndefVar(Dummy):
37
+ """
38
+ A type that is created by Expr.undef to represent an undefined variable.
39
+ This type can be promoted to any other type.
40
+ This is introduced to handle Python 3.12 LOAD_FAST_AND_CLEAR.
41
+ """
42
+
43
+ def can_convert_to(self, typingctx, other):
44
+ return Conversion.promote
45
+
46
+
47
+ class RawPointer(Opaque):
48
+ """
49
+ A raw pointer without any specific meaning.
50
+ """
51
+
52
+
53
+ class StringLiteral(Literal, Dummy):
54
+
55
+ def can_convert_to(self, typingctx, other):
56
+ if isinstance(other, UnicodeType):
57
+ return Conversion.safe
58
+
59
+
60
+ Literal.ctor_map[str] = StringLiteral
61
+
62
+
63
+ def unliteral(lit_type):
64
+ """
65
+ Get base type from Literal type.
66
+ """
67
+ if hasattr(lit_type, '__unliteral__'):
68
+ return lit_type.__unliteral__()
69
+ return getattr(lit_type, 'literal_type', lit_type)
70
+
71
+
72
+ def literal(value):
73
+ """Returns a Literal instance or raise LiteralTypingError
74
+ """
75
+ ty = type(value)
76
+ if isinstance(value, Literal):
77
+ msg = "the function does not accept a Literal type; got {} ({})"
78
+ raise ValueError(msg.format(value, ty))
79
+ try:
80
+ ctor = Literal.ctor_map[ty]
81
+ except KeyError:
82
+ raise LiteralTypingError("{} cannot be used as a literal".format(ty))
83
+ else:
84
+ return ctor(value)
85
+
86
+
87
+ def maybe_literal(value):
88
+ """Get a Literal type for the value or None.
89
+ """
90
+ try:
91
+ return literal(value)
92
+ except LiteralTypingError:
93
+ return
94
+
95
+
96
+ class Omitted(Opaque):
97
+ """
98
+ An omitted function argument with a default value.
99
+ """
100
+
101
+ def __init__(self, value):
102
+ self._value = value
103
+ # Use helper function to support both hashable and non-hashable
104
+ # values. See discussion in gh #6957.
105
+ self._value_key = get_hashable_key(value)
106
+ super(Omitted, self).__init__("omitted(default=%r)" % (value,))
107
+
108
+ @property
109
+ def key(self):
110
+ return type(self._value), self._value_key
111
+
112
+ @property
113
+ def value(self):
114
+ return self._value
115
+
116
+
117
+ class VarArg(Type):
118
+ """
119
+ Special type representing a variable number of arguments at the
120
+ end of a function's signature. Only used for signature matching,
121
+ not for actual values.
122
+ """
123
+
124
+ def __init__(self, dtype):
125
+ self.dtype = dtype
126
+ super(VarArg, self).__init__("*%s" % dtype)
127
+
128
+ @property
129
+ def key(self):
130
+ return self.dtype
131
+
132
+
133
+ class Module(Dummy):
134
+ def __init__(self, pymod):
135
+ self.pymod = pymod
136
+ super(Module, self).__init__("Module(%s)" % pymod)
137
+
138
+ @property
139
+ def key(self):
140
+ return self.pymod
141
+
142
+
143
+ class MemInfoPointer(Type):
144
+ """
145
+ Pointer to a Numba "meminfo" (i.e. the information for a managed
146
+ piece of memory).
147
+ """
148
+ mutable = True
149
+
150
+ def __init__(self, dtype):
151
+ self.dtype = dtype
152
+ name = "memory-managed *%s" % dtype
153
+ super(MemInfoPointer, self).__init__(name)
154
+
155
+ @property
156
+ def key(self):
157
+ return self.dtype
158
+
159
+
160
+ class CPointer(Type):
161
+ """
162
+ Type class for pointers to other types.
163
+
164
+ Attributes
165
+ ----------
166
+ dtype : The pointee type
167
+ addrspace : int
168
+ The address space pointee belongs to.
169
+ """
170
+ mutable = True
171
+
172
+ def __init__(self, dtype, addrspace=None):
173
+ self.dtype = dtype
174
+ self.addrspace = addrspace
175
+ if addrspace is not None:
176
+ name = "%s_%s*" % (dtype, addrspace)
177
+ else:
178
+ name = "%s*" % dtype
179
+ super(CPointer, self).__init__(name)
180
+
181
+ @property
182
+ def key(self):
183
+ return self.dtype, self.addrspace
184
+
185
+
186
+ class EphemeralPointer(CPointer):
187
+ """
188
+ Type class for pointers which aren't guaranteed to last long - e.g.
189
+ stack-allocated slots. The data model serializes such pointers
190
+ by copying the data pointed to.
191
+ """
192
+
193
+
194
+ class EphemeralArray(Type):
195
+ """
196
+ Similar to EphemeralPointer, but pointing to an array of elements,
197
+ rather than a single one. The array size must be known at compile-time.
198
+ """
199
+
200
+ def __init__(self, dtype, count):
201
+ self.dtype = dtype
202
+ self.count = count
203
+ name = "*%s[%d]" % (dtype, count)
204
+ super(EphemeralArray, self).__init__(name)
205
+
206
+ @property
207
+ def key(self):
208
+ return self.dtype, self.count
209
+
210
+
211
+ class Object(Type):
212
+ # XXX unused?
213
+ mutable = True
214
+
215
+ def __init__(self, clsobj):
216
+ self.cls = clsobj
217
+ name = "Object(%s)" % clsobj.__name__
218
+ super(Object, self).__init__(name)
219
+
220
+ @property
221
+ def key(self):
222
+ return self.cls
223
+
224
+
225
+ class Optional(Type):
226
+ """
227
+ Type class for optional types, i.e. union { some type, None }
228
+ """
229
+
230
+ def __init__(self, typ):
231
+ assert not isinstance(typ, (Optional, NoneType))
232
+ typ = unliteral(typ)
233
+ self.type = typ
234
+ name = "OptionalType(%s)" % self.type
235
+ super(Optional, self).__init__(name)
236
+
237
+ @property
238
+ def key(self):
239
+ return self.type
240
+
241
+ def can_convert_to(self, typingctx, other):
242
+ if isinstance(other, Optional):
243
+ return typingctx.can_convert(self.type, other.type)
244
+ else:
245
+ conv = typingctx.can_convert(self.type, other)
246
+ if conv is not None:
247
+ return max(conv, Conversion.safe)
248
+
249
+ def can_convert_from(self, typingctx, other):
250
+ if isinstance(other, NoneType):
251
+ return Conversion.promote
252
+ elif isinstance(other, Optional):
253
+ return typingctx.can_convert(other.type, self.type)
254
+ else:
255
+ conv = typingctx.can_convert(other, self.type)
256
+ if conv is not None:
257
+ return max(conv, Conversion.promote)
258
+
259
+ def unify(self, typingctx, other):
260
+ if isinstance(other, Optional):
261
+ unified = typingctx.unify_pairs(self.type, other.type)
262
+ else:
263
+ unified = typingctx.unify_pairs(self.type, other)
264
+
265
+ if unified is not None:
266
+ if isinstance(unified, Optional):
267
+ return unified
268
+ else:
269
+ return Optional(unified)
270
+
271
+
272
+ class NoneType(Opaque):
273
+ """
274
+ The type for None.
275
+ """
276
+
277
+ def unify(self, typingctx, other):
278
+ """
279
+ Turn anything to a Optional type;
280
+ """
281
+ if isinstance(other, (Optional, NoneType)):
282
+ return other
283
+ return Optional(other)
284
+
285
+
286
+ class EllipsisType(Opaque):
287
+ """
288
+ The type for the Ellipsis singleton.
289
+ """
290
+
291
+
292
+ class ExceptionClass(Callable, Phantom):
293
+ """
294
+ The type of exception classes (not instances).
295
+ """
296
+
297
+ def __init__(self, exc_class):
298
+ assert issubclass(exc_class, BaseException)
299
+ name = "%s" % (exc_class.__name__)
300
+ self.exc_class = exc_class
301
+ super(ExceptionClass, self).__init__(name)
302
+
303
+ def get_call_type(self, context, args, kws):
304
+ return self.get_call_signatures()[0][0]
305
+
306
+ def get_call_signatures(self):
307
+ from numba.core import typing
308
+ return_type = ExceptionInstance(self.exc_class)
309
+ return [typing.signature(return_type)], False
310
+
311
+ def get_impl_key(self, sig):
312
+ return type(self)
313
+
314
+ @property
315
+ def key(self):
316
+ return self.exc_class
317
+
318
+
319
+ class ExceptionInstance(Phantom):
320
+ """
321
+ The type of exception instances. *exc_class* should be the
322
+ exception class.
323
+ """
324
+
325
+ def __init__(self, exc_class):
326
+ assert issubclass(exc_class, BaseException)
327
+ name = "%s(...)" % (exc_class.__name__,)
328
+ self.exc_class = exc_class
329
+ super(ExceptionInstance, self).__init__(name)
330
+
331
+ @property
332
+ def key(self):
333
+ return self.exc_class
334
+
335
+
336
+ class SliceType(Type):
337
+
338
+ def __init__(self, name, members):
339
+ assert members in (2, 3)
340
+ self.members = members
341
+ self.has_step = members >= 3
342
+ super(SliceType, self).__init__(name)
343
+
344
+ @property
345
+ def key(self):
346
+ return self.members
347
+
348
+
349
+ class SliceLiteral(Literal, SliceType):
350
+ def __init__(self, value):
351
+ self._literal_init(value)
352
+ name = 'Literal[slice]({})'.format(value)
353
+ members = 2 if value.step is None else 3
354
+ SliceType.__init__(self, name=name, members=members)
355
+
356
+ @property
357
+ def key(self):
358
+ sl = self.literal_value
359
+ return sl.start, sl.stop, sl.step
360
+
361
+
362
+ Literal.ctor_map[slice] = SliceLiteral
363
+
364
+
365
+ class ClassInstanceType(Type):
366
+ """
367
+ The type of a jitted class *instance*. It will be the return-type
368
+ of the constructor of the class.
369
+ """
370
+ mutable = True
371
+ name_prefix = "instance"
372
+
373
+ def __init__(self, class_type):
374
+ self.class_type = class_type
375
+ name = "{0}.{1}".format(self.name_prefix, self.class_type.name)
376
+ super(ClassInstanceType, self).__init__(name)
377
+
378
+ def get_data_type(self):
379
+ return ClassDataType(self)
380
+
381
+ def get_reference_type(self):
382
+ return self
383
+
384
+ @property
385
+ def key(self):
386
+ return self.class_type.key
387
+
388
+ @property
389
+ def classname(self):
390
+ return self.class_type.class_name
391
+
392
+ @property
393
+ def jit_props(self):
394
+ return self.class_type.jit_props
395
+
396
+ @property
397
+ def jit_static_methods(self):
398
+ return self.class_type.jit_static_methods
399
+
400
+ @property
401
+ def jit_methods(self):
402
+ return self.class_type.jit_methods
403
+
404
+ @property
405
+ def struct(self):
406
+ return self.class_type.struct
407
+
408
+ @property
409
+ def methods(self):
410
+ return self.class_type.methods
411
+
412
+ @property
413
+ def static_methods(self):
414
+ return self.class_type.static_methods
415
+
416
+
417
+ class ClassType(Callable, Opaque):
418
+ """
419
+ The type of the jitted class (not instance). When the type of a class
420
+ is called, its constructor is invoked.
421
+ """
422
+ mutable = True
423
+ name_prefix = "jitclass"
424
+ instance_type_class = ClassInstanceType
425
+
426
+ def __init__(self, class_def, ctor_template_cls, struct, jit_methods,
427
+ jit_props, jit_static_methods):
428
+ self.class_name = class_def.__name__
429
+ self.class_doc = class_def.__doc__
430
+ self._ctor_template_class = ctor_template_cls
431
+ self.jit_methods = jit_methods
432
+ self.jit_props = jit_props
433
+ self.jit_static_methods = jit_static_methods
434
+ self.struct = struct
435
+ fielddesc = ','.join("{0}:{1}".format(k, v) for k, v in struct.items())
436
+ name = "{0}.{1}#{2:x}<{3}>".format(self.name_prefix, self.class_name,
437
+ id(self), fielddesc)
438
+ super(ClassType, self).__init__(name)
439
+
440
+ def get_call_type(self, context, args, kws):
441
+ return self.ctor_template(context).apply(args, kws)
442
+
443
+ def get_call_signatures(self):
444
+ return (), True
445
+
446
+ def get_impl_key(self, sig):
447
+ return type(self)
448
+
449
+ @property
450
+ def methods(self):
451
+ return {k: v.py_func for k, v in self.jit_methods.items()}
452
+
453
+ @property
454
+ def static_methods(self):
455
+ return {k: v.py_func for k, v in self.jit_static_methods.items()}
456
+
457
+ @property
458
+ def instance_type(self):
459
+ return ClassInstanceType(self)
460
+
461
+ @property
462
+ def ctor_template(self):
463
+ return self._specialize_template(self._ctor_template_class)
464
+
465
+ def _specialize_template(self, basecls):
466
+ return type(basecls.__name__, (basecls,), dict(key=self))
467
+
468
+
469
+ class DeferredType(Type):
470
+ """
471
+ Represents a type that will be defined later. It must be defined
472
+ before it is materialized (used in the compiler). Once defined, it
473
+ behaves exactly as the type it is defining.
474
+ """
475
+
476
+ def __init__(self):
477
+ self._define = None
478
+ name = "{0}#{1}".format(type(self).__name__, id(self))
479
+ super(DeferredType, self).__init__(name)
480
+
481
+ def get(self):
482
+ if self._define is None:
483
+ raise RuntimeError("deferred type not defined")
484
+ return self._define
485
+
486
+ def define(self, typ):
487
+ if self._define is not None:
488
+ raise TypeError("deferred type already defined")
489
+ if not isinstance(typ, Type):
490
+ raise TypeError("arg is not a Type; got: {0}".format(type(typ)))
491
+ self._define = typ
492
+
493
+ def unify(self, typingctx, other):
494
+ return typingctx.unify_pairs(self.get(), other)
495
+
496
+
497
+ class ClassDataType(Type):
498
+ """
499
+ Internal only.
500
+ Represents the data of the instance. The representation of
501
+ ClassInstanceType contains a pointer to a ClassDataType which represents
502
+ a C structure that contains all the data fields of the class instance.
503
+ """
504
+
505
+ def __init__(self, classtyp):
506
+ self.class_type = classtyp
507
+ name = "data.{0}".format(self.class_type.name)
508
+ super(ClassDataType, self).__init__(name)
509
+
510
+
511
+ class ContextManager(Callable, Phantom):
512
+ """
513
+ An overly-simple ContextManager type that cannot be materialized.
514
+ """
515
+
516
+ def __init__(self, cm):
517
+ self.cm = cm
518
+ super(ContextManager, self).__init__("ContextManager({})".format(cm))
519
+
520
+ def get_call_signatures(self):
521
+ if not self.cm.is_callable:
522
+ msg = "contextmanager {} is not callable".format(self.cm)
523
+ raise TypingError(msg)
524
+
525
+ return (), False
526
+
527
+ def get_call_type(self, context, args, kws):
528
+ from numba.core import typing
529
+
530
+ if not self.cm.is_callable:
531
+ msg = "contextmanager {} is not callable".format(self.cm)
532
+ raise TypingError(msg)
533
+
534
+ posargs = list(args) + [v for k, v in sorted(kws.items())]
535
+ return typing.signature(self, *posargs)
536
+
537
+ def get_impl_key(self, sig):
538
+ return type(self)
539
+
540
+
541
+ class UnicodeType(IterableType, Hashable):
542
+
543
+ def __init__(self, name):
544
+ super(UnicodeType, self).__init__(name)
545
+
546
+ @property
547
+ def iterator_type(self):
548
+ return UnicodeIteratorType(self)
549
+
550
+
551
+ class UnicodeIteratorType(SimpleIteratorType):
552
+
553
+ def __init__(self, dtype):
554
+ name = "iter_unicode"
555
+ self.data = dtype
556
+ super(UnicodeIteratorType, self).__init__(name, dtype)
lib/python3.10/site-packages/numba/core/types/new_scalars/__init__.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from numba.core.types.new_scalars.scalars import (
2
+ Integer, IntegerLiteral, Boolean, BooleanLiteral, Float, Complex,
3
+ parse_integer_bitwidth, parse_integer_signed,
4
+ _NPDatetimeBase, NPTimedelta, NPDatetime, EnumClass, IntEnumClass,
5
+ EnumMember, IntEnumMember
6
+ )
7
+ from numba.core.types.new_scalars.python_types import (
8
+ PythonBoolean, PythonInteger, PythonFloat, PythonComplex,
9
+ PythonBooleanLiteral, PythonIntegerLiteral
10
+ )
11
+ from numba.core.types.new_scalars.machine_types import (
12
+ MachineBoolean, MachineInteger, MachineFloat, MachineComplex,
13
+ MachineBooleanLiteral, MachineIntegerLiteral
14
+ )
15
+ from numba.core.types.new_scalars.numpy_types import (
16
+ NumPyBoolean, NumPyInteger, NumPyFloat, NumPyComplex,
17
+ NumPyBooleanLiteral, NumPyIntegerLiteral
18
+ )
lib/python3.10/site-packages/numba/core/types/new_scalars/machine_types.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Type definitions for machine types.
3
+ """
4
+
5
+ from numba.core.types.new_scalars.scalars \
6
+ import (Integer, IntegerLiteral, Boolean,
7
+ BooleanLiteral, Float, Complex,
8
+ parse_integer_bitwidth, parse_integer_signed)
9
+ from functools import total_ordering
10
+ from numba.core.typeconv import Conversion
11
+
12
+
13
+ @total_ordering
14
+ class MachineInteger(Integer):
15
+ def __init__(self, name, bitwidth=None, signed=None):
16
+ super(MachineInteger, self).__init__(name)
17
+ if bitwidth is None:
18
+ bitwidth = parse_integer_bitwidth(name)
19
+ if signed is None:
20
+ signed = parse_integer_signed(name)
21
+ self.bitwidth = bitwidth
22
+ self.signed = signed
23
+
24
+ @classmethod
25
+ def from_bitwidth(cls, bitwidth, signed=True):
26
+ name = ('int%d' if signed else 'uint%d') % bitwidth
27
+ return cls(name)
28
+
29
+ def __lt__(self, other):
30
+ if self.__class__ is not other.__class__:
31
+ return NotImplemented
32
+ if self.signed != other.signed:
33
+ return NotImplemented
34
+ return self.bitwidth < other.bitwidth
35
+
36
+ @property
37
+ def maxval(self):
38
+ """
39
+ The maximum value representable by this type.
40
+ """
41
+ if self.signed:
42
+ return (1 << (self.bitwidth - 1)) - 1
43
+ else:
44
+ return (1 << self.bitwidth) - 1
45
+
46
+ @property
47
+ def minval(self):
48
+ """
49
+ The minimal value representable by this type.
50
+ """
51
+ if self.signed:
52
+ return -(1 << (self.bitwidth - 1))
53
+ else:
54
+ return 0
55
+
56
+
57
+ class MachineIntegerLiteral(IntegerLiteral, MachineInteger):
58
+ def __init__(self, value):
59
+ self._literal_init(value)
60
+ name = 'Literal[machine_int]({})'.format(value)
61
+ basetype = self.literal_type
62
+ MachineInteger.__init__(self,
63
+ name=name,
64
+ bitwidth=basetype.bitwidth,
65
+ signed=basetype.signed,)
66
+
67
+ def can_convert_to(self, typingctx, other):
68
+ conv = typingctx.can_convert(self.literal_type, other)
69
+ if conv is not None:
70
+ return max(conv, Conversion.promote)
71
+
72
+
73
+ class MachineBoolean(Boolean):
74
+ pass
75
+
76
+
77
+ class MachineBooleanLiteral(BooleanLiteral, MachineBoolean):
78
+
79
+ def __init__(self, value):
80
+ self._literal_init(value)
81
+ name = 'Literal[machine_bool]({})'.format(value)
82
+ MachineBoolean.__init__(self,
83
+ name=name)
84
+
85
+ def can_convert_to(self, typingctx, other):
86
+ conv = typingctx.can_convert(self.literal_type, other)
87
+ if conv is not None:
88
+ return max(conv, Conversion.promote)
89
+
90
+
91
+ @total_ordering
92
+ class MachineFloat(Float):
93
+ def __init__(self, *args, **kws):
94
+ super(MachineFloat, self).__init__(*args, **kws)
95
+ # Determine bitwidth
96
+ assert self.name.startswith('c_float')
97
+ bitwidth = int(self.name[8:])
98
+ self.bitwidth = bitwidth
99
+
100
+ def __lt__(self, other):
101
+ if self.__class__ is not other.__class__:
102
+ return NotImplemented
103
+ return self.bitwidth < other.bitwidth
104
+
105
+
106
+ @total_ordering
107
+ class MachineComplex(Complex):
108
+ def __init__(self, name, underlying_float, **kwargs):
109
+ super(MachineComplex, self).__init__(name, **kwargs)
110
+ self.underlying_float = underlying_float
111
+ # Determine bitwidth
112
+ assert self.name.startswith('c_complex')
113
+ bitwidth = int(self.name[10:])
114
+ self.bitwidth = bitwidth
115
+
116
+ def __lt__(self, other):
117
+ if self.__class__ is not other.__class__:
118
+ return NotImplemented
119
+ return self.bitwidth < other.bitwidth
lib/python3.10/site-packages/numba/core/types/new_scalars/numpy_types.py ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Type definitions for NumPy types.
3
+ """
4
+
5
+ import numpy as np
6
+
7
+ from numba.core.types.abstract import Literal
8
+ from numba.core.types.new_scalars.scalars \
9
+ import (Integer, IntegerLiteral, Boolean,
10
+ BooleanLiteral, Float, Complex,
11
+ parse_integer_bitwidth, parse_integer_signed)
12
+ from functools import total_ordering
13
+ from numba.core.typeconv import Conversion
14
+
15
+
16
+ @total_ordering
17
+ class NumPyInteger(Integer):
18
+ def __init__(self, name, bitwidth=None, signed=None):
19
+ super(NumPyInteger, self).__init__(name)
20
+ if bitwidth is None:
21
+ bitwidth = parse_integer_bitwidth(name)
22
+ if signed is None:
23
+ signed = parse_integer_signed(name)
24
+ self.bitwidth = bitwidth
25
+ self.signed = signed
26
+
27
+ @classmethod
28
+ def from_bitwidth(cls, bitwidth, signed=True):
29
+ name = ('np_int%d' if signed else 'np_uint%d') % bitwidth
30
+ return cls(name)
31
+
32
+ def cast_python_value(self, value):
33
+ sign_char = "" if self.signed else "u"
34
+ return getattr(
35
+ np,
36
+ sign_char + "int" + str(self.bitwidth)
37
+ )(value)
38
+
39
+ def __lt__(self, other):
40
+ if self.__class__ is not other.__class__:
41
+ return NotImplemented
42
+ if self.signed != other.signed:
43
+ return NotImplemented
44
+ return self.bitwidth < other.bitwidth
45
+
46
+ @property
47
+ def maxval(self):
48
+ """
49
+ The maximum value representable by this type.
50
+ """
51
+ if self.signed:
52
+ return (1 << (self.bitwidth - 1)) - 1
53
+ else:
54
+ return (1 << self.bitwidth) - 1
55
+
56
+ @property
57
+ def minval(self):
58
+ """
59
+ The minimal value representable by this type.
60
+ """
61
+ if self.signed:
62
+ return -(1 << (self.bitwidth - 1))
63
+ else:
64
+ return 0
65
+
66
+
67
+ class NumPyIntegerLiteral(IntegerLiteral):
68
+ def __init__(self, value):
69
+ self._literal_init(value)
70
+ name = 'Literal[int]({})'.format(value)
71
+ basetype = self.literal_type
72
+ NumPyInteger.__init__(self,
73
+ name=name,
74
+ bitwidth=basetype.bitwidth,
75
+ signed=basetype.signed,)
76
+
77
+ def can_convert_to(self, typingctx, other):
78
+ conv = typingctx.can_convert(self.literal_type, other)
79
+ if conv is not None:
80
+ return max(conv, Conversion.promote)
81
+
82
+
83
+ Literal.ctor_map[np.integer] = NumPyIntegerLiteral
84
+
85
+
86
+ class NumPyBoolean(Boolean):
87
+ def cast_python_value(self, value):
88
+ return np.bool_(value)
89
+
90
+
91
+ class NumPyBooleanLiteral(BooleanLiteral, NumPyBoolean):
92
+
93
+ def __init__(self, value):
94
+ self._literal_init(value)
95
+ name = 'Literal[np.bool_]({})'.format(value)
96
+ NumPyBoolean.__init__(self,
97
+ name=name)
98
+
99
+ def can_convert_to(self, typingctx, other):
100
+ conv = typingctx.can_convert(self.literal_type, other)
101
+ if conv is not None:
102
+ return max(conv, Conversion.promote)
103
+
104
+
105
+ Literal.ctor_map[np.bool_] = NumPyBooleanLiteral
106
+
107
+
108
+ @total_ordering
109
+ class NumPyFloat(Float):
110
+ def __init__(self, *args, **kws):
111
+ super(NumPyFloat, self).__init__(*args, **kws)
112
+ # Determine bitwidth
113
+ assert self.name.startswith('np_float')
114
+ bitwidth = int(self.name[8:])
115
+ self.bitwidth = bitwidth
116
+
117
+ def cast_python_value(self, value):
118
+ return getattr(np, "float" + str(self.bitwidth))(value)
119
+
120
+ def __lt__(self, other):
121
+ if self.__class__ is not other.__class__:
122
+ return NotImplemented
123
+ return self.bitwidth < other.bitwidth
124
+
125
+
126
+ @total_ordering
127
+ class NumPyComplex(Complex):
128
+ def __init__(self, name, underlying_float, **kwargs):
129
+ super(NumPyComplex, self).__init__(name, **kwargs)
130
+ self.underlying_float = underlying_float
131
+ # Determine bitwidth
132
+ assert self.name.startswith('np_complex')
133
+ bitwidth = int(self.name[10:])
134
+ self.bitwidth = bitwidth
135
+
136
+ def cast_python_value(self, value):
137
+ return getattr(np, "complex" + str(self.bitwidth))(value)
138
+
139
+ def __lt__(self, other):
140
+ if self.__class__ is not other.__class__:
141
+ return NotImplemented
142
+ return self.bitwidth < other.bitwidth
lib/python3.10/site-packages/numba/core/types/new_scalars/python_types.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Type definitions for Python types.
3
+ """
4
+
5
+ from numba.core.types.abstract import Literal
6
+ from numba.core.types.new_scalars.scalars \
7
+ import (Integer, IntegerLiteral, Boolean,
8
+ BooleanLiteral, Float, Complex,
9
+ parse_integer_signed)
10
+ from functools import total_ordering
11
+ from numba.core.typeconv import Conversion
12
+
13
+
14
+ @total_ordering
15
+ class PythonInteger(Integer):
16
+ def __init__(self, name, bitwidth=None, signed=None):
17
+ super(PythonInteger, self).__init__(name)
18
+ if bitwidth is None:
19
+ bitwidth = 64
20
+ if signed is None:
21
+ signed = parse_integer_signed(name)
22
+ self.bitwidth = bitwidth
23
+ self.signed = signed
24
+
25
+ def cast_python_value(self, value):
26
+ return int(value)
27
+
28
+ def __lt__(self, other):
29
+ if self.__class__ is not other.__class__:
30
+ return NotImplemented
31
+ if self.signed != other.signed:
32
+ return NotImplemented
33
+ return self.bitwidth < other.bitwidth
34
+
35
+ @property
36
+ def maxval(self):
37
+ """
38
+ The maximum value representable by this type.
39
+ """
40
+ if self.signed:
41
+ return (1 << (self.bitwidth - 1)) - 1
42
+ else:
43
+ return (1 << self.bitwidth) - 1
44
+
45
+ @property
46
+ def minval(self):
47
+ """
48
+ The minimal value representable by this type.
49
+ """
50
+ if self.signed:
51
+ return -(1 << (self.bitwidth - 1))
52
+ else:
53
+ return 0
54
+
55
+
56
+ class PythonIntegerLiteral(IntegerLiteral, PythonInteger):
57
+ def __init__(self, value):
58
+ self._literal_init(value)
59
+ name = 'Literal[int]({})'.format(value)
60
+ basetype = self.literal_type
61
+ PythonInteger.__init__(self,
62
+ name=name,
63
+ bitwidth=basetype.bitwidth,
64
+ signed=basetype.signed,)
65
+
66
+ def can_convert_to(self, typingctx, other):
67
+ conv = typingctx.can_convert(self.literal_type, other)
68
+ if conv is not None:
69
+ return max(conv, Conversion.promote)
70
+
71
+
72
+ Literal.ctor_map[int] = PythonIntegerLiteral
73
+
74
+
75
+ class PythonBoolean(Boolean):
76
+ def cast_python_value(self, value):
77
+ return bool(value)
78
+
79
+
80
+ class PythonBooleanLiteral(BooleanLiteral, PythonBoolean):
81
+
82
+ def __init__(self, value):
83
+ self._literal_init(value)
84
+ name = 'Literal[bool]({})'.format(value)
85
+ PythonBoolean.__init__(self, name=name)
86
+
87
+ def can_convert_to(self, typingctx, other):
88
+ conv = typingctx.can_convert(self.literal_type, other)
89
+ if conv is not None:
90
+ return max(conv, Conversion.promote)
91
+
92
+
93
+ Literal.ctor_map[bool] = PythonBooleanLiteral
94
+
95
+
96
+ @total_ordering
97
+ class PythonFloat(Float):
98
+ def __init__(self, *args, **kws):
99
+ super(PythonFloat, self).__init__(*args, **kws)
100
+ # Determine bitwidth
101
+ assert self.name.startswith('py_float')
102
+ bitwidth = 64
103
+ self.bitwidth = bitwidth
104
+
105
+ def cast_python_value(self, value):
106
+ return float(value)
107
+
108
+ def __lt__(self, other):
109
+ if self.__class__ is not other.__class__:
110
+ return NotImplemented
111
+ return self.bitwidth < other.bitwidth
112
+
113
+
114
+ @total_ordering
115
+ class PythonComplex(Complex):
116
+ def __init__(self, name, underlying_float, **kwargs):
117
+ super(PythonComplex, self).__init__(name, **kwargs)
118
+ self.underlying_float = underlying_float
119
+ # Determine bitwidth
120
+ assert self.name.startswith('py_complex')
121
+ bitwidth = 128
122
+ self.bitwidth = bitwidth
123
+
124
+ def cast_python_value(self, value):
125
+ return complex(value)
126
+
127
+ def __lt__(self, other):
128
+ if self.__class__ is not other.__class__:
129
+ return NotImplemented
130
+ return self.bitwidth < other.bitwidth
lib/python3.10/site-packages/numba/core/types/new_scalars/scalars.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import enum
2
+ import re
3
+ import numpy as np
4
+
5
+ from numba.core.types.abstract import Dummy, Hashable, Literal, Number, Type
6
+ from functools import total_ordering, cached_property
7
+ from numba.core import utils
8
+ from numba.core.typeconv import Conversion
9
+ from numba.np import npdatetime_helpers
10
+
11
+
12
+ class Boolean(Hashable):
13
+ pass
14
+
15
+ def parse_integer_bitwidth(name):
16
+ bitwidth = int(re.findall(r'\d+', name)[-1])
17
+ return bitwidth
18
+
19
+
20
+ def parse_integer_signed(name):
21
+ signed = name.startswith('int')
22
+ return signed
23
+
24
+
25
+ class Integer(Number):
26
+ pass
27
+
28
+
29
+ class IntegerLiteral(Literal, Integer):
30
+ pass
31
+
32
+
33
+ class BooleanLiteral(Literal, Boolean):
34
+ pass
35
+
36
+
37
+ class Float(Number):
38
+ pass
39
+
40
+
41
+ class Complex(Number):
42
+ pass
43
+
44
+
45
+ class _NPDatetimeBase(Type):
46
+ """
47
+ Common base class for np.datetime64 and np.timedelta64.
48
+ """
49
+
50
+ def __init__(self, unit, *args, **kws):
51
+ name = '%s[%s]' % (self.type_name, unit)
52
+ self.unit = unit
53
+ self.unit_code = npdatetime_helpers.DATETIME_UNITS[self.unit]
54
+ super(_NPDatetimeBase, self).__init__(name, *args, **kws)
55
+
56
+ def __lt__(self, other):
57
+ if self.__class__ is not other.__class__:
58
+ return NotImplemented
59
+ # A coarser-grained unit is "smaller", i.e. less precise values
60
+ # can be represented (but the magnitude of representable values is
61
+ # also greater...).
62
+ return self.unit_code < other.unit_code
63
+
64
+ def cast_python_value(self, value):
65
+ cls = getattr(np, self.type_name)
66
+ if self.unit:
67
+ return cls(value, self.unit)
68
+ else:
69
+ return cls(value)
70
+
71
+
72
+ @total_ordering
73
+ class NPTimedelta(_NPDatetimeBase):
74
+ type_name = 'timedelta64'
75
+
76
+ @total_ordering
77
+ class NPDatetime(_NPDatetimeBase):
78
+ type_name = 'datetime64'
79
+
80
+
81
+ class EnumClass(Dummy):
82
+ """
83
+ Type class for Enum classes.
84
+ """
85
+ basename = "Enum class"
86
+
87
+ def __init__(self, cls, dtype):
88
+ assert isinstance(cls, type)
89
+ assert isinstance(dtype, Type)
90
+ self.instance_class = cls
91
+ self.dtype = dtype
92
+ name = "%s<%s>(%s)" % (self.basename, self.dtype, self.instance_class.__name__)
93
+ super(EnumClass, self).__init__(name)
94
+
95
+ @property
96
+ def key(self):
97
+ return self.instance_class, self.dtype
98
+
99
+ @cached_property
100
+ def member_type(self):
101
+ """
102
+ The type of this class' members.
103
+ """
104
+ return EnumMember(self.instance_class, self.dtype)
105
+
106
+
107
+ class IntEnumClass(EnumClass):
108
+ """
109
+ Type class for IntEnum classes.
110
+ """
111
+ basename = "IntEnum class"
112
+
113
+ @cached_property
114
+ def member_type(self):
115
+ """
116
+ The type of this class' members.
117
+ """
118
+ return IntEnumMember(self.instance_class, self.dtype)
119
+
120
+
121
+ class EnumMember(Type):
122
+ """
123
+ Type class for Enum members.
124
+ """
125
+ basename = "Enum"
126
+ class_type_class = EnumClass
127
+
128
+ def __init__(self, cls, dtype):
129
+ assert isinstance(cls, type)
130
+ assert isinstance(dtype, Type)
131
+ self.instance_class = cls
132
+ self.dtype = dtype
133
+ name = "%s<%s>(%s)" % (self.basename, self.dtype, self.instance_class.__name__)
134
+ super(EnumMember, self).__init__(name)
135
+
136
+ @property
137
+ def key(self):
138
+ return self.instance_class, self.dtype
139
+
140
+ @property
141
+ def class_type(self):
142
+ """
143
+ The type of this member's class.
144
+ """
145
+ return self.class_type_class(self.instance_class, self.dtype)
146
+
147
+
148
+ class IntEnumMember(EnumMember):
149
+ """
150
+ Type class for IntEnum members.
151
+ """
152
+ basename = "IntEnum"
153
+ class_type_class = IntEnumClass
154
+
155
+ def can_convert_to(self, typingctx, other):
156
+ """
157
+ Convert IntEnum members to plain integers.
158
+ """
159
+ if issubclass(self.instance_class, enum.IntEnum):
160
+ conv = typingctx.can_convert(self.dtype, other)
161
+ return max(conv, Conversion.safe)
lib/python3.10/site-packages/numba/core/types/npytypes.py ADDED
@@ -0,0 +1,649 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import warnings
3
+ from functools import cached_property
4
+
5
+ from llvmlite import ir
6
+
7
+ from .abstract import DTypeSpec, IteratorType, MutableSequence, Number, Type
8
+ from .common import Buffer, Opaque, SimpleIteratorType
9
+ from numba.core.typeconv import Conversion
10
+ from numba.core import utils
11
+ from .misc import UnicodeType
12
+ from .containers import Bytes
13
+ import numpy as np
14
+
15
+ class CharSeq(Type):
16
+ """
17
+ A fixed-length 8-bit character sequence.
18
+ """
19
+ mutable = True
20
+
21
+ def __init__(self, count):
22
+ self.count = count
23
+ name = "[char x %d]" % count
24
+ super(CharSeq, self).__init__(name)
25
+
26
+ @property
27
+ def key(self):
28
+ return self.count
29
+
30
+ def can_convert_from(self, typingctx, other):
31
+ if isinstance(other, Bytes):
32
+ return Conversion.safe
33
+
34
+
35
+ class UnicodeCharSeq(Type):
36
+ """
37
+ A fixed-length unicode character sequence.
38
+ """
39
+ mutable = True
40
+
41
+ def __init__(self, count):
42
+ self.count = count
43
+ name = "[unichr x %d]" % count
44
+ super(UnicodeCharSeq, self).__init__(name)
45
+
46
+ @property
47
+ def key(self):
48
+ return self.count
49
+
50
+ def can_convert_to(self, typingctx, other):
51
+ if isinstance(other, UnicodeCharSeq):
52
+ return Conversion.safe
53
+
54
+ def can_convert_from(self, typingctx, other):
55
+ if isinstance(other, UnicodeType):
56
+ # Assuming that unicode_type itemsize is not greater than
57
+ # numpy.dtype('U1').itemsize that UnicodeCharSeq is based
58
+ # on.
59
+ return Conversion.safe
60
+
61
+ def __repr__(self):
62
+ return f"UnicodeCharSeq({self.count})"
63
+
64
+
65
+ _RecordField = collections.namedtuple(
66
+ '_RecordField',
67
+ 'type,offset,alignment,title',
68
+ )
69
+
70
+
71
+ class Record(Type):
72
+ """
73
+ A Record datatype can be mapped to a NumPy structured dtype.
74
+ A record is very flexible since it is laid out as a list of bytes.
75
+ Fields can be mapped to arbitrary points inside it, even if they overlap.
76
+
77
+ *fields* is a list of `(name:str, data:dict)`.
78
+ Where `data` is `{ type: Type, offset: int }`
79
+ *size* is an int; the record size
80
+ *aligned* is a boolean; whether the record is ABI aligned.
81
+ """
82
+ mutable = True
83
+
84
+ @classmethod
85
+ def make_c_struct(cls, name_types):
86
+ """Construct a Record type from a list of (name:str, type:Types).
87
+ The layout of the structure will follow C.
88
+
89
+ Note: only scalar types are supported currently.
90
+ """
91
+ from numba.core.registry import cpu_target
92
+
93
+ ctx = cpu_target.target_context
94
+ offset = 0
95
+ fields = []
96
+ lltypes = []
97
+ for k, ty in name_types:
98
+ if not isinstance(ty, (Number, NestedArray)):
99
+ msg = "Only Number and NestedArray types are supported, found: {}. "
100
+ raise TypeError(msg.format(ty))
101
+ if isinstance(ty, NestedArray):
102
+ datatype = ctx.data_model_manager[ty].as_storage_type()
103
+ else:
104
+ datatype = ctx.get_data_type(ty)
105
+ lltypes.append(datatype)
106
+ size = ctx.get_abi_sizeof(datatype)
107
+ align = ctx.get_abi_alignment(datatype)
108
+ # align
109
+ misaligned = offset % align
110
+ if misaligned:
111
+ offset += align - misaligned
112
+ fields.append((k, {
113
+ 'type': ty, 'offset': offset, 'alignment': align,
114
+ }))
115
+ offset += size
116
+ # Adjust sizeof structure
117
+ abi_size = ctx.get_abi_sizeof(ir.LiteralStructType(lltypes))
118
+ return Record(fields, size=abi_size, aligned=True)
119
+
120
+ def __init__(self, fields, size, aligned):
121
+ fields = self._normalize_fields(fields)
122
+ self.fields = dict(fields)
123
+ self.size = size
124
+ self.aligned = aligned
125
+
126
+ # Create description
127
+ descbuf = []
128
+ fmt = "{}[type={};offset={}{}]"
129
+ for k, infos in fields:
130
+ extra = ""
131
+ if infos.alignment is not None:
132
+ extra += ';alignment={}'.format(infos.alignment)
133
+ elif infos.title is not None:
134
+ extra += ';title={}'.format(infos.title)
135
+ descbuf.append(fmt.format(k, infos.type, infos.offset, extra))
136
+
137
+ desc = ','.join(descbuf)
138
+ name = 'Record({};{};{})'.format(desc, self.size, self.aligned)
139
+ super(Record, self).__init__(name)
140
+
141
+ self.bitwidth = self.dtype.itemsize * 8
142
+
143
+ @classmethod
144
+ def _normalize_fields(cls, fields):
145
+ """
146
+ fields:
147
+ [name: str,
148
+ value: {
149
+ type: Type,
150
+ offset: int,
151
+ [ alignment: int ],
152
+ [ title : str],
153
+ }]
154
+ """
155
+ res = []
156
+ for name, infos in sorted(fields, key=lambda x: (x[1]['offset'], x[0])):
157
+ fd = _RecordField(
158
+ type=infos['type'],
159
+ offset=infos['offset'],
160
+ alignment=infos.get('alignment'),
161
+ title=infos.get('title'),
162
+ )
163
+ res.append((name, fd))
164
+ return res
165
+
166
+ @property
167
+ def key(self):
168
+ # Numpy dtype equality doesn't always succeed, use the name instead
169
+ # (https://github.com/numpy/numpy/issues/5715)
170
+ return self.name
171
+
172
+ @property
173
+ def mangling_args(self):
174
+ return self.__class__.__name__, (self._code,)
175
+
176
+ def __len__(self):
177
+ """Returns the number of fields
178
+ """
179
+ return len(self.fields)
180
+
181
+ def offset(self, key):
182
+ """Get the byte offset of a field from the start of the structure.
183
+ """
184
+ return self.fields[key].offset
185
+
186
+ def typeof(self, key):
187
+ """Get the type of a field.
188
+ """
189
+ return self.fields[key].type
190
+
191
+ def alignof(self, key):
192
+ """Get the specified alignment of the field.
193
+
194
+ Since field alignment is optional, this may return None.
195
+ """
196
+ return self.fields[key].alignment
197
+
198
+ def has_titles(self):
199
+ """Returns True the record uses titles.
200
+ """
201
+ return any(fd.title is not None for fd in self.fields.values())
202
+
203
+ def is_title(self, key):
204
+ """Returns True if the field named *key* is a title.
205
+ """
206
+ return self.fields[key].title == key
207
+
208
+ @property
209
+ def members(self):
210
+ """An ordered list of (name, type) for the fields.
211
+ """
212
+ ordered = sorted(self.fields.items(), key=lambda x: x[1].offset)
213
+ return [(k, v.type) for k, v in ordered]
214
+
215
+ @property
216
+ def dtype(self):
217
+ from numba.np.numpy_support import as_struct_dtype
218
+
219
+ return as_struct_dtype(self)
220
+
221
+ def can_convert_to(self, typingctx, other):
222
+ """
223
+ Convert this Record to the *other*.
224
+
225
+ This method only implements width subtyping for records.
226
+ """
227
+ from numba.core.errors import NumbaExperimentalFeatureWarning
228
+
229
+ if isinstance(other, Record):
230
+ if len(other.fields) > len(self.fields):
231
+ return
232
+ for other_fd, self_fd in zip(other.fields.items(),
233
+ self.fields.items()):
234
+ if not other_fd == self_fd:
235
+ return
236
+ warnings.warn(f"{self} has been considered a subtype of {other} "
237
+ f" This is an experimental feature.",
238
+ category=NumbaExperimentalFeatureWarning)
239
+ return Conversion.safe
240
+
241
+ def __repr__(self):
242
+ fields = [f"('{f_name}', " +
243
+ f"{{'type': {repr(f_info.type)}, " +
244
+ f"'offset': {f_info.offset}, " +
245
+ f"'alignment': {f_info.alignment}, " +
246
+ f"'title': {f_info.title}, " +
247
+ f"}}" +
248
+ ")"
249
+ for f_name, f_info in self.fields.items()
250
+ ]
251
+ fields = "[" + ", ".join(fields) + "]"
252
+ return f"Record({fields}, {self.size}, {self.aligned})"
253
+
254
+ class DType(DTypeSpec, Opaque):
255
+ """
256
+ Type class associated with the `np.dtype`.
257
+
258
+ i.e. :code:`assert type(np.dtype('int32')) == np.dtype`
259
+
260
+ np.dtype('int32')
261
+ """
262
+
263
+ def __init__(self, dtype):
264
+ assert isinstance(dtype, Type)
265
+ self._dtype = dtype
266
+ name = "dtype(%s)" % (dtype,)
267
+ super(DTypeSpec, self).__init__(name)
268
+
269
+ @property
270
+ def key(self):
271
+ return self.dtype
272
+
273
+ @property
274
+ def dtype(self):
275
+ return self._dtype
276
+
277
+ def __getitem__(self, arg):
278
+ res = super(DType, self).__getitem__(arg)
279
+ return res.copy(dtype=self.dtype)
280
+
281
+
282
+ class NumpyFlatType(SimpleIteratorType, MutableSequence):
283
+ """
284
+ Type class for `ndarray.flat()` objects.
285
+ """
286
+
287
+ def __init__(self, arrty):
288
+ self.array_type = arrty
289
+ yield_type = arrty.dtype
290
+ self.dtype = yield_type
291
+ name = "array.flat({arrayty})".format(arrayty=arrty)
292
+ super(NumpyFlatType, self).__init__(name, yield_type)
293
+
294
+ @property
295
+ def key(self):
296
+ return self.array_type
297
+
298
+
299
+ class NumpyNdEnumerateType(SimpleIteratorType):
300
+ """
301
+ Type class for `np.ndenumerate()` objects.
302
+ """
303
+
304
+ def __init__(self, arrty):
305
+ from . import Tuple, UniTuple, intp
306
+ self.array_type = arrty
307
+ yield_type = Tuple((UniTuple(intp, arrty.ndim), arrty.dtype))
308
+ name = "ndenumerate({arrayty})".format(arrayty=arrty)
309
+ super(NumpyNdEnumerateType, self).__init__(name, yield_type)
310
+
311
+ @property
312
+ def key(self):
313
+ return self.array_type
314
+
315
+
316
+ class NumpyNdIterType(IteratorType):
317
+ """
318
+ Type class for `np.nditer()` objects.
319
+
320
+ The layout denotes in which order the logical shape is iterated on.
321
+ "C" means logical order (corresponding to in-memory order in C arrays),
322
+ "F" means reverse logical order (corresponding to in-memory order in
323
+ F arrays).
324
+ """
325
+
326
+ def __init__(self, arrays):
327
+ # Note inputs arrays can also be scalars, in which case they are
328
+ # broadcast.
329
+ self.arrays = tuple(arrays)
330
+ self.layout = self._compute_layout(self.arrays)
331
+ self.dtypes = tuple(getattr(a, 'dtype', a) for a in self.arrays)
332
+ self.ndim = max(getattr(a, 'ndim', 0) for a in self.arrays)
333
+ name = "nditer(ndim={ndim}, layout={layout}, inputs={arrays})".format(
334
+ ndim=self.ndim, layout=self.layout, arrays=self.arrays)
335
+ super(NumpyNdIterType, self).__init__(name)
336
+
337
+ @classmethod
338
+ def _compute_layout(cls, arrays):
339
+ c = collections.Counter()
340
+ for a in arrays:
341
+ if not isinstance(a, Array):
342
+ continue
343
+ if a.layout in 'CF' and a.ndim == 1:
344
+ c['C'] += 1
345
+ c['F'] += 1
346
+ elif a.ndim >= 1:
347
+ c[a.layout] += 1
348
+ return 'F' if c['F'] > c['C'] else 'C'
349
+
350
+ @property
351
+ def key(self):
352
+ return self.arrays
353
+
354
+ @property
355
+ def views(self):
356
+ """
357
+ The views yielded by the iterator.
358
+ """
359
+ return [Array(dtype, 0, 'C') for dtype in self.dtypes]
360
+
361
+ @property
362
+ def yield_type(self):
363
+ from . import BaseTuple
364
+ views = self.views
365
+ if len(views) > 1:
366
+ return BaseTuple.from_types(views)
367
+ else:
368
+ return views[0]
369
+
370
+ @cached_property
371
+ def indexers(self):
372
+ """
373
+ A list of (kind, start_dim, end_dim, indices) where:
374
+ - `kind` is either "flat", "indexed", "0d" or "scalar"
375
+ - `start_dim` and `end_dim` are the dimension numbers at which
376
+ this indexing takes place
377
+ - `indices` is the indices of the indexed arrays in self.arrays
378
+ """
379
+ d = collections.OrderedDict()
380
+ layout = self.layout
381
+ ndim = self.ndim
382
+ assert layout in 'CF'
383
+ for i, a in enumerate(self.arrays):
384
+ if not isinstance(a, Array):
385
+ indexer = ('scalar', 0, 0)
386
+ elif a.ndim == 0:
387
+ indexer = ('0d', 0, 0)
388
+ else:
389
+ if a.layout == layout or (a.ndim == 1 and a.layout in 'CF'):
390
+ kind = 'flat'
391
+ else:
392
+ kind = 'indexed'
393
+ if layout == 'C':
394
+ # If iterating in C order, broadcasting is done on the outer indices
395
+ indexer = (kind, ndim - a.ndim, ndim)
396
+ else:
397
+ indexer = (kind, 0, a.ndim)
398
+ d.setdefault(indexer, []).append(i)
399
+ return list(k + (v,) for k, v in d.items())
400
+
401
+ @cached_property
402
+ def need_shaped_indexing(self):
403
+ """
404
+ Whether iterating on this iterator requires keeping track of
405
+ individual indices inside the shape. If False, only a single index
406
+ over the equivalent flat shape is required, which can make the
407
+ iterator more efficient.
408
+ """
409
+ for kind, start_dim, end_dim, _ in self.indexers:
410
+ if kind in ('0d', 'scalar'):
411
+ pass
412
+ elif kind == 'flat':
413
+ if (start_dim, end_dim) != (0, self.ndim):
414
+ # Broadcast flat iteration needs shaped indexing
415
+ # to know when to restart iteration.
416
+ return True
417
+ else:
418
+ return True
419
+ return False
420
+
421
+
422
+ class NumpyNdIndexType(SimpleIteratorType):
423
+ """
424
+ Type class for `np.ndindex()` objects.
425
+ """
426
+
427
+ def __init__(self, ndim):
428
+ from . import UniTuple, intp
429
+ self.ndim = ndim
430
+ yield_type = UniTuple(intp, self.ndim)
431
+ name = "ndindex(ndim={ndim})".format(ndim=ndim)
432
+ super(NumpyNdIndexType, self).__init__(name, yield_type)
433
+
434
+ @property
435
+ def key(self):
436
+ return self.ndim
437
+
438
+
439
+ class Array(Buffer):
440
+ """
441
+ Type class for Numpy arrays.
442
+ """
443
+
444
+ def __init__(self, dtype, ndim, layout, readonly=False, name=None,
445
+ aligned=True):
446
+ if readonly:
447
+ self.mutable = False
448
+ if (not aligned or
449
+ (isinstance(dtype, Record) and not dtype.aligned)):
450
+ self.aligned = False
451
+ if isinstance(dtype, NestedArray):
452
+ ndim += dtype.ndim
453
+ dtype = dtype.dtype
454
+ if name is None:
455
+ type_name = "array"
456
+ if not self.mutable:
457
+ type_name = "readonly " + type_name
458
+ if not self.aligned:
459
+ type_name = "unaligned " + type_name
460
+ name = "%s(%s, %sd, %s)" % (type_name, dtype, ndim, layout)
461
+ super(Array, self).__init__(dtype, ndim, layout, name=name)
462
+
463
+ @property
464
+ def mangling_args(self):
465
+ args = [self.dtype, self.ndim, self.layout,
466
+ 'mutable' if self.mutable else 'readonly',
467
+ 'aligned' if self.aligned else 'unaligned']
468
+ return self.__class__.__name__, args
469
+
470
+ def copy(self, dtype=None, ndim=None, layout=None, readonly=None):
471
+ if dtype is None:
472
+ dtype = self.dtype
473
+ if ndim is None:
474
+ ndim = self.ndim
475
+ if layout is None:
476
+ layout = self.layout
477
+ if readonly is None:
478
+ readonly = not self.mutable
479
+ return Array(dtype=dtype, ndim=ndim, layout=layout, readonly=readonly,
480
+ aligned=self.aligned)
481
+
482
+ @property
483
+ def key(self):
484
+ return self.dtype, self.ndim, self.layout, self.mutable, self.aligned
485
+
486
+ def unify(self, typingctx, other):
487
+ """
488
+ Unify this with the *other* Array.
489
+ """
490
+ # If other is array and the ndim matches
491
+ if isinstance(other, Array) and other.ndim == self.ndim:
492
+ # If dtype matches or other.dtype is undefined (inferred)
493
+ if other.dtype == self.dtype or not other.dtype.is_precise():
494
+ if self.layout == other.layout:
495
+ layout = self.layout
496
+ else:
497
+ layout = 'A'
498
+ readonly = not (self.mutable and other.mutable)
499
+ aligned = self.aligned and other.aligned
500
+ return Array(dtype=self.dtype, ndim=self.ndim, layout=layout,
501
+ readonly=readonly, aligned=aligned)
502
+
503
+ def can_convert_to(self, typingctx, other):
504
+ """
505
+ Convert this Array to the *other*.
506
+ """
507
+ if (isinstance(other, Array) and other.ndim == self.ndim
508
+ and other.dtype == self.dtype):
509
+ if (other.layout in ('A', self.layout)
510
+ and (self.mutable or not other.mutable)
511
+ and (self.aligned or not other.aligned)):
512
+ return Conversion.safe
513
+
514
+ def is_precise(self):
515
+ return self.dtype.is_precise()
516
+
517
+ @property
518
+ def box_type(self):
519
+ """Returns the Python type to box to.
520
+ """
521
+ return np.ndarray
522
+
523
+ def __repr__(self):
524
+ return (
525
+ f"Array({repr(self.dtype)}, {self.ndim}, '{self.layout}', "
526
+ f"{not self.mutable}, aligned={self.aligned})"
527
+ )
528
+
529
+ class ArrayCTypes(Type):
530
+ """
531
+ This is the type for `np.ndarray.ctypes`.
532
+ """
533
+ def __init__(self, arytype):
534
+ # This depends on the ndim for the shape and strides attributes,
535
+ # even though they are not implemented, yet.
536
+ self.dtype = arytype.dtype
537
+ self.ndim = arytype.ndim
538
+ name = "ArrayCTypes(dtype={0}, ndim={1})".format(self.dtype, self.ndim)
539
+ super(ArrayCTypes, self).__init__(name)
540
+
541
+ @property
542
+ def key(self):
543
+ return self.dtype, self.ndim
544
+
545
+ def can_convert_to(self, typingctx, other):
546
+ """
547
+ Convert this type to the corresponding pointer type.
548
+ This allows passing a array.ctypes object to a C function taking
549
+ a raw pointer.
550
+
551
+ Note that in pure Python, the array.ctypes object can only be
552
+ passed to a ctypes function accepting a c_void_p, not a typed
553
+ pointer.
554
+ """
555
+ from . import CPointer, voidptr
556
+ # XXX what about readonly
557
+ if isinstance(other, CPointer) and other.dtype == self.dtype:
558
+ return Conversion.safe
559
+ elif other == voidptr:
560
+ return Conversion.safe
561
+
562
+
563
+ class ArrayFlags(Type):
564
+ """
565
+ This is the type for `np.ndarray.flags`.
566
+ """
567
+ def __init__(self, arytype):
568
+ self.array_type = arytype
569
+ name = "ArrayFlags({0})".format(self.array_type)
570
+ super(ArrayFlags, self).__init__(name)
571
+
572
+ @property
573
+ def key(self):
574
+ return self.array_type
575
+
576
+
577
+ class NestedArray(Array):
578
+ """
579
+ A NestedArray is an array nested within a structured type (which are "void"
580
+ type in NumPy parlance). Unlike an Array, the shape, and not just the number
581
+ of dimensions is part of the type of a NestedArray.
582
+ """
583
+
584
+ def __init__(self, dtype, shape):
585
+ if isinstance(dtype, NestedArray):
586
+ tmp = Array(dtype.dtype, dtype.ndim, 'C')
587
+ shape += dtype.shape
588
+ dtype = tmp.dtype
589
+ assert dtype.bitwidth % 8 == 0, \
590
+ "Dtype bitwidth must be a multiple of bytes"
591
+ self._shape = shape
592
+ name = "nestedarray(%s, %s)" % (dtype, shape)
593
+ ndim = len(shape)
594
+ super(NestedArray, self).__init__(dtype, ndim, 'C', name=name)
595
+
596
+ @property
597
+ def shape(self):
598
+ return self._shape
599
+
600
+ @property
601
+ def nitems(self):
602
+ l = 1
603
+ for s in self.shape:
604
+ l = l * s
605
+ return l
606
+
607
+ @property
608
+ def size(self):
609
+ return self.dtype.bitwidth // 8
610
+
611
+ @property
612
+ def strides(self):
613
+ stride = self.size
614
+ strides = []
615
+ for i in reversed(self._shape):
616
+ strides.append(stride)
617
+ stride *= i
618
+ return tuple(reversed(strides))
619
+
620
+ @property
621
+ def key(self):
622
+ return self.dtype, self.shape
623
+
624
+ def __repr__(self):
625
+ return f"NestedArray({repr(self.dtype)}, {self.shape})"
626
+
627
+
628
+ class NumPyRandomBitGeneratorType(Type):
629
+ def __init__(self, *args, **kwargs):
630
+ super(NumPyRandomBitGeneratorType, self).__init__(*args, **kwargs)
631
+ self.name = 'NumPyRandomBitGeneratorType'
632
+
633
+
634
+ class NumPyRandomGeneratorType(Type):
635
+ def __init__(self, *args, **kwargs):
636
+ super(NumPyRandomGeneratorType, self).__init__(*args, **kwargs)
637
+ self.name = 'NumPyRandomGeneratorType'
638
+
639
+
640
+ class PolynomialType(Type):
641
+ def __init__(self, coef, domain=None, window=None, n_args=1):
642
+ super(PolynomialType, self).__init__(name=f'PolynomialType({coef}, {domain}, {domain}, {n_args})')
643
+ self.coef = coef
644
+ self.domain = domain
645
+ self.window = window
646
+ # We use n_args to keep track of the number of arguments in the
647
+ # constructor, since the types of domain and window arguments depend on
648
+ # that and we need that information when boxing
649
+ self.n_args = n_args
lib/python3.10/site-packages/numba/core/types/old_scalars.py ADDED
@@ -0,0 +1,270 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import enum
2
+
3
+ import numpy as np
4
+
5
+ from .abstract import Dummy, Hashable, Literal, Number, Type
6
+ from functools import total_ordering, cached_property
7
+ from numba.core import utils
8
+ from numba.core.typeconv import Conversion
9
+ from numba.np import npdatetime_helpers
10
+
11
+
12
+ class Boolean(Hashable):
13
+
14
+ def cast_python_value(self, value):
15
+ return bool(value)
16
+
17
+
18
+ def parse_integer_bitwidth(name):
19
+ for prefix in ('int', 'uint'):
20
+ if name.startswith(prefix):
21
+ bitwidth = int(name[len(prefix):])
22
+ return bitwidth
23
+
24
+
25
+ def parse_integer_signed(name):
26
+ signed = name.startswith('int')
27
+ return signed
28
+
29
+
30
+ @total_ordering
31
+ class Integer(Number):
32
+ def __init__(self, name, bitwidth=None, signed=None):
33
+ super(Integer, self).__init__(name)
34
+ if bitwidth is None:
35
+ bitwidth = parse_integer_bitwidth(name)
36
+ if signed is None:
37
+ signed = parse_integer_signed(name)
38
+ self.bitwidth = bitwidth
39
+ self.signed = signed
40
+
41
+ @classmethod
42
+ def from_bitwidth(cls, bitwidth, signed=True):
43
+ name = ('int%d' if signed else 'uint%d') % bitwidth
44
+ return cls(name)
45
+
46
+ def cast_python_value(self, value):
47
+ return getattr(np, self.name)(value)
48
+
49
+ def __lt__(self, other):
50
+ if self.__class__ is not other.__class__:
51
+ return NotImplemented
52
+ if self.signed != other.signed:
53
+ return NotImplemented
54
+ return self.bitwidth < other.bitwidth
55
+
56
+ @property
57
+ def maxval(self):
58
+ """
59
+ The maximum value representable by this type.
60
+ """
61
+ if self.signed:
62
+ return (1 << (self.bitwidth - 1)) - 1
63
+ else:
64
+ return (1 << self.bitwidth) - 1
65
+
66
+ @property
67
+ def minval(self):
68
+ """
69
+ The minimal value representable by this type.
70
+ """
71
+ if self.signed:
72
+ return -(1 << (self.bitwidth - 1))
73
+ else:
74
+ return 0
75
+
76
+
77
+ class IntegerLiteral(Literal, Integer):
78
+ def __init__(self, value):
79
+ self._literal_init(value)
80
+ name = 'Literal[int]({})'.format(value)
81
+ basetype = self.literal_type
82
+ Integer.__init__(
83
+ self,
84
+ name=name,
85
+ bitwidth=basetype.bitwidth,
86
+ signed=basetype.signed,
87
+ )
88
+
89
+ def can_convert_to(self, typingctx, other):
90
+ conv = typingctx.can_convert(self.literal_type, other)
91
+ if conv is not None:
92
+ return max(conv, Conversion.promote)
93
+
94
+
95
+ Literal.ctor_map[int] = IntegerLiteral
96
+
97
+
98
+ class BooleanLiteral(Literal, Boolean):
99
+
100
+ def __init__(self, value):
101
+ self._literal_init(value)
102
+ name = 'Literal[bool]({})'.format(value)
103
+ Boolean.__init__(
104
+ self,
105
+ name=name
106
+ )
107
+
108
+ def can_convert_to(self, typingctx, other):
109
+ conv = typingctx.can_convert(self.literal_type, other)
110
+ if conv is not None:
111
+ return max(conv, Conversion.promote)
112
+
113
+
114
+ Literal.ctor_map[bool] = BooleanLiteral
115
+
116
+
117
+ @total_ordering
118
+ class Float(Number):
119
+ def __init__(self, *args, **kws):
120
+ super(Float, self).__init__(*args, **kws)
121
+ # Determine bitwidth
122
+ assert self.name.startswith('float')
123
+ bitwidth = int(self.name[5:])
124
+ self.bitwidth = bitwidth
125
+
126
+ def cast_python_value(self, value):
127
+ return getattr(np, self.name)(value)
128
+
129
+ def __lt__(self, other):
130
+ if self.__class__ is not other.__class__:
131
+ return NotImplemented
132
+ return self.bitwidth < other.bitwidth
133
+
134
+
135
+ @total_ordering
136
+ class Complex(Number):
137
+ def __init__(self, name, underlying_float, **kwargs):
138
+ super(Complex, self).__init__(name, **kwargs)
139
+ self.underlying_float = underlying_float
140
+ # Determine bitwidth
141
+ assert self.name.startswith('complex')
142
+ bitwidth = int(self.name[7:])
143
+ self.bitwidth = bitwidth
144
+
145
+ def cast_python_value(self, value):
146
+ return getattr(np, self.name)(value)
147
+
148
+ def __lt__(self, other):
149
+ if self.__class__ is not other.__class__:
150
+ return NotImplemented
151
+ return self.bitwidth < other.bitwidth
152
+
153
+
154
+ class _NPDatetimeBase(Type):
155
+ """
156
+ Common base class for np.datetime64 and np.timedelta64.
157
+ """
158
+
159
+ def __init__(self, unit, *args, **kws):
160
+ name = '%s[%s]' % (self.type_name, unit)
161
+ self.unit = unit
162
+ self.unit_code = npdatetime_helpers.DATETIME_UNITS[self.unit]
163
+ super(_NPDatetimeBase, self).__init__(name, *args, **kws)
164
+
165
+ def __lt__(self, other):
166
+ if self.__class__ is not other.__class__:
167
+ return NotImplemented
168
+ # A coarser-grained unit is "smaller", i.e. less precise values
169
+ # can be represented (but the magnitude of representable values is
170
+ # also greater...).
171
+ return self.unit_code < other.unit_code
172
+
173
+ def cast_python_value(self, value):
174
+ cls = getattr(np, self.type_name)
175
+ if self.unit:
176
+ return cls(value, self.unit)
177
+ else:
178
+ return cls(value)
179
+
180
+
181
+ @total_ordering
182
+ class NPTimedelta(_NPDatetimeBase):
183
+ type_name = 'timedelta64'
184
+
185
+ @total_ordering
186
+ class NPDatetime(_NPDatetimeBase):
187
+ type_name = 'datetime64'
188
+
189
+
190
+ class EnumClass(Dummy):
191
+ """
192
+ Type class for Enum classes.
193
+ """
194
+ basename = "Enum class"
195
+
196
+ def __init__(self, cls, dtype):
197
+ assert isinstance(cls, type)
198
+ assert isinstance(dtype, Type)
199
+ self.instance_class = cls
200
+ self.dtype = dtype
201
+ name = "%s<%s>(%s)" % (self.basename, self.dtype, self.instance_class.__name__)
202
+ super(EnumClass, self).__init__(name)
203
+
204
+ @property
205
+ def key(self):
206
+ return self.instance_class, self.dtype
207
+
208
+ @cached_property
209
+ def member_type(self):
210
+ """
211
+ The type of this class' members.
212
+ """
213
+ return EnumMember(self.instance_class, self.dtype)
214
+
215
+
216
+ class IntEnumClass(EnumClass):
217
+ """
218
+ Type class for IntEnum classes.
219
+ """
220
+ basename = "IntEnum class"
221
+
222
+ @cached_property
223
+ def member_type(self):
224
+ """
225
+ The type of this class' members.
226
+ """
227
+ return IntEnumMember(self.instance_class, self.dtype)
228
+
229
+
230
+ class EnumMember(Type):
231
+ """
232
+ Type class for Enum members.
233
+ """
234
+ basename = "Enum"
235
+ class_type_class = EnumClass
236
+
237
+ def __init__(self, cls, dtype):
238
+ assert isinstance(cls, type)
239
+ assert isinstance(dtype, Type)
240
+ self.instance_class = cls
241
+ self.dtype = dtype
242
+ name = "%s<%s>(%s)" % (self.basename, self.dtype, self.instance_class.__name__)
243
+ super(EnumMember, self).__init__(name)
244
+
245
+ @property
246
+ def key(self):
247
+ return self.instance_class, self.dtype
248
+
249
+ @property
250
+ def class_type(self):
251
+ """
252
+ The type of this member's class.
253
+ """
254
+ return self.class_type_class(self.instance_class, self.dtype)
255
+
256
+
257
+ class IntEnumMember(EnumMember):
258
+ """
259
+ Type class for IntEnum members.
260
+ """
261
+ basename = "IntEnum"
262
+ class_type_class = IntEnumClass
263
+
264
+ def can_convert_to(self, typingctx, other):
265
+ """
266
+ Convert IntEnum members to plain integers.
267
+ """
268
+ if issubclass(self.instance_class, enum.IntEnum):
269
+ conv = typingctx.can_convert(self.dtype, other)
270
+ return max(conv, Conversion.safe)
lib/python3.10/site-packages/numba/core/types/scalars.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ from numba.core.utils import _RedirectSubpackage
3
+ from numba.core import config
4
+
5
+ if config.USE_LEGACY_TYPE_SYSTEM: # type: ignore
6
+ sys.modules[__name__] = _RedirectSubpackage(
7
+ locals(), "numba.core.types.old_scalars"
8
+ )
9
+ else:
10
+ sys.modules[__name__] = _RedirectSubpackage(
11
+ locals(), "numba.core.types.new_scalars"
12
+ )
lib/python3.10/site-packages/numba/core/typing/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .context import BaseContext, Context
2
+ from .templates import (signature, make_concrete_template, Signature,
3
+ fold_arguments)
lib/python3.10/site-packages/numba/core/typing/arraydecl.py ADDED
@@ -0,0 +1,880 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import operator
3
+ from collections import namedtuple
4
+
5
+ from numba.core import types, utils
6
+ from numba.core.typing.templates import (AttributeTemplate, AbstractTemplate,
7
+ infer, infer_global, infer_getattr,
8
+ signature, bound_function)
9
+ # import time side effect: array operations requires typing support of sequence
10
+ # defined in collections: e.g. array.shape[i]
11
+ from numba.core.typing import collections
12
+ from numba.core.errors import (TypingError, RequireLiteralValue, NumbaTypeError,
13
+ NumbaNotImplementedError, NumbaAssertionError,
14
+ NumbaKeyError, NumbaIndexError, NumbaValueError)
15
+ from numba.core.cgutils import is_nonelike
16
+
17
+ numpy_version = tuple(map(int, np.__version__.split('.')[:2]))
18
+
19
+
20
+ Indexing = namedtuple("Indexing", ("index", "result", "advanced"))
21
+
22
+
23
+ def get_array_index_type(ary, idx):
24
+ """
25
+ Returns None or a tuple-3 for the types of the input array, index, and
26
+ resulting type of ``array[index]``.
27
+
28
+ Note: This is shared logic for ndarray getitem and setitem.
29
+ """
30
+ if not isinstance(ary, types.Buffer):
31
+ return
32
+
33
+ ndim = ary.ndim
34
+
35
+ left_indices = []
36
+ right_indices = []
37
+ ellipsis_met = False
38
+ advanced = False
39
+ num_newaxis = 0
40
+
41
+ if not isinstance(idx, types.BaseTuple):
42
+ idx = [idx]
43
+
44
+ # Here, a subspace is considered as a contiguous group of advanced indices.
45
+ # num_subspaces keeps track of the number of such
46
+ # contiguous groups.
47
+ in_subspace = False
48
+ num_subspaces = 0
49
+ array_indices = 0
50
+
51
+ # Walk indices
52
+ for ty in idx:
53
+ if ty is types.ellipsis:
54
+ if ellipsis_met:
55
+ raise NumbaTypeError(
56
+ "Only one ellipsis allowed in array indices "
57
+ "(got %s)" % (idx,))
58
+ ellipsis_met = True
59
+ in_subspace = False
60
+ elif isinstance(ty, types.SliceType):
61
+ # If we encounter a non-advanced index while in a
62
+ # subspace then that subspace ends.
63
+ in_subspace = False
64
+ # In advanced indexing, any index broadcastable to an
65
+ # array is considered an advanced index. Hence all the
66
+ # branches below are considered as advanced indices.
67
+ elif isinstance(ty, types.Integer):
68
+ # Normalize integer index
69
+ ty = types.intp if ty.signed else types.uintp
70
+ # Integer indexing removes the given dimension
71
+ ndim -= 1
72
+ # If we're within a subspace/contiguous group of
73
+ # advanced indices then no action is necessary
74
+ # since we've already counted that subspace once.
75
+ if not in_subspace:
76
+ # If we're not within a subspace and we encounter
77
+ # this branch then we have a new subspace/group.
78
+ num_subspaces += 1
79
+ in_subspace = True
80
+ elif (isinstance(ty, types.Array) and ty.ndim == 0
81
+ and isinstance(ty.dtype, types.Integer)):
82
+ # 0-d array used as integer index
83
+ ndim -= 1
84
+ if not in_subspace:
85
+ num_subspaces += 1
86
+ in_subspace = True
87
+ elif (isinstance(ty, types.Array)
88
+ and isinstance(ty.dtype, (types.Integer, types.Boolean))):
89
+ if ty.ndim > 1:
90
+ # Advanced indexing limitation # 1
91
+ raise NumbaTypeError(
92
+ "Multi-dimensional indices are not supported.")
93
+ array_indices += 1
94
+ # The condition for activating advanced indexing is simply
95
+ # having at least one array with size > 1.
96
+ advanced = True
97
+ if not in_subspace:
98
+ num_subspaces += 1
99
+ in_subspace = True
100
+ elif (is_nonelike(ty)):
101
+ ndim += 1
102
+ num_newaxis += 1
103
+ else:
104
+ raise NumbaTypeError("Unsupported array index type %s in %s"
105
+ % (ty, idx))
106
+ (right_indices if ellipsis_met else left_indices).append(ty)
107
+
108
+ if advanced:
109
+ if array_indices > 1:
110
+ # Advanced indexing limitation # 2
111
+ msg = "Using more than one non-scalar array index is unsupported."
112
+ raise NumbaTypeError(msg)
113
+
114
+ if num_subspaces > 1:
115
+ # Advanced indexing limitation # 3
116
+ msg = ("Using more than one indexing subspace is unsupported."
117
+ " An indexing subspace is a group of one or more"
118
+ " consecutive indices comprising integer or array types.")
119
+ raise NumbaTypeError(msg)
120
+
121
+ # Only Numpy arrays support advanced indexing
122
+ if advanced and not isinstance(ary, types.Array):
123
+ return
124
+
125
+ # Check indices and result dimensionality
126
+ all_indices = left_indices + right_indices
127
+ if ellipsis_met:
128
+ assert right_indices[0] is types.ellipsis
129
+ del right_indices[0]
130
+
131
+ n_indices = len(all_indices) - ellipsis_met - num_newaxis
132
+ if n_indices > ary.ndim:
133
+ raise NumbaTypeError("cannot index %s with %d indices: %s"
134
+ % (ary, n_indices, idx))
135
+ if n_indices == ary.ndim and ndim == 0 and not ellipsis_met:
136
+ # Full integer indexing => scalar result
137
+ # (note if ellipsis is present, a 0-d view is returned instead)
138
+ res = ary.dtype
139
+
140
+ elif advanced:
141
+ # Result is a copy
142
+ res = ary.copy(ndim=ndim, layout='C', readonly=False)
143
+
144
+ else:
145
+ # Result is a view
146
+ if ary.slice_is_copy:
147
+ # Avoid view semantics when the original type creates a copy
148
+ # when slicing.
149
+ return
150
+
151
+ # Infer layout
152
+ layout = ary.layout
153
+
154
+ def keeps_contiguity(ty, is_innermost):
155
+ # A slice can only keep an array contiguous if it is the
156
+ # innermost index and it is not strided
157
+ return (ty is types.ellipsis or isinstance(ty, types.Integer)
158
+ or (is_innermost and isinstance(ty, types.SliceType)
159
+ and not ty.has_step))
160
+
161
+ def check_contiguity(outer_indices):
162
+ """
163
+ Whether indexing with the given indices (from outer to inner in
164
+ physical layout order) can keep an array contiguous.
165
+ """
166
+ for ty in outer_indices[:-1]:
167
+ if not keeps_contiguity(ty, False):
168
+ return False
169
+ if outer_indices and not keeps_contiguity(outer_indices[-1], True):
170
+ return False
171
+ return True
172
+
173
+ if layout == 'C':
174
+ # Integer indexing on the left keeps the array C-contiguous
175
+ if n_indices == ary.ndim:
176
+ # If all indices are there, ellipsis's place is indifferent
177
+ left_indices = left_indices + right_indices
178
+ right_indices = []
179
+ if right_indices:
180
+ layout = 'A'
181
+ elif not check_contiguity(left_indices):
182
+ layout = 'A'
183
+ elif layout == 'F':
184
+ # Integer indexing on the right keeps the array F-contiguous
185
+ if n_indices == ary.ndim:
186
+ # If all indices are there, ellipsis's place is indifferent
187
+ right_indices = left_indices + right_indices
188
+ left_indices = []
189
+ if left_indices:
190
+ layout = 'A'
191
+ elif not check_contiguity(right_indices[::-1]):
192
+ layout = 'A'
193
+
194
+ if ndim == 0:
195
+ # Implicitly convert to a scalar if the output ndim==0
196
+ res = ary.dtype
197
+ else:
198
+ res = ary.copy(ndim=ndim, layout=layout)
199
+
200
+ # Re-wrap indices
201
+ if isinstance(idx, types.BaseTuple):
202
+ idx = types.BaseTuple.from_types(all_indices)
203
+ else:
204
+ idx, = all_indices
205
+
206
+ return Indexing(idx, res, advanced)
207
+
208
+
209
+ @infer_global(operator.getitem)
210
+ class GetItemBuffer(AbstractTemplate):
211
+ def generic(self, args, kws):
212
+ assert not kws
213
+ [ary, idx] = args
214
+ out = get_array_index_type(ary, idx)
215
+ if out is not None:
216
+ return signature(out.result, ary, out.index)
217
+
218
+
219
+ @infer_global(operator.setitem)
220
+ class SetItemBuffer(AbstractTemplate):
221
+ def generic(self, args, kws):
222
+ assert not kws
223
+ ary, idx, val = args
224
+ if not isinstance(ary, types.Buffer):
225
+ return
226
+ if not ary.mutable:
227
+ msg = f"Cannot modify readonly array of type: {ary}"
228
+ raise NumbaTypeError(msg)
229
+ out = get_array_index_type(ary, idx)
230
+ if out is None:
231
+ return
232
+
233
+ idx = out.index
234
+ res = out.result # res is the result type of the access ary[idx]
235
+ if isinstance(res, types.Array):
236
+ # Indexing produces an array
237
+ if isinstance(val, types.Array):
238
+ if not self.context.can_convert(val.dtype, res.dtype):
239
+ # DType conversion not possible
240
+ return
241
+ else:
242
+ res = val
243
+ elif isinstance(val, types.Sequence):
244
+ if (res.ndim == 1 and
245
+ self.context.can_convert(val.dtype, res.dtype)):
246
+ # Allow assignment of sequence to 1d array
247
+ res = val
248
+ else:
249
+ # NOTE: sequence-to-array broadcasting is unsupported
250
+ return
251
+ else:
252
+ # Allow scalar broadcasting
253
+ if self.context.can_convert(val, res.dtype):
254
+ res = res.dtype
255
+ else:
256
+ # Incompatible scalar type
257
+ return
258
+ elif not isinstance(val, types.Array):
259
+ # Single item assignment
260
+ if not self.context.can_convert(val, res):
261
+ # if the array dtype is not yet defined
262
+ if not res.is_precise():
263
+ # set the array type to use the dtype of value (RHS)
264
+ newary = ary.copy(dtype=val)
265
+ return signature(types.none, newary, idx, res)
266
+ else:
267
+ return
268
+ res = val
269
+ elif (isinstance(val, types.Array) and val.ndim == 0
270
+ and self.context.can_convert(val.dtype, res)):
271
+ # val is an array(T, 0d, O), where T is the type of res, O is order
272
+ res = val
273
+ else:
274
+ return
275
+ return signature(types.none, ary, idx, res)
276
+
277
+
278
+ def normalize_shape(shape):
279
+ if isinstance(shape, types.UniTuple):
280
+ if isinstance(shape.dtype, types.Integer):
281
+ dimtype = types.intp if shape.dtype.signed else types.uintp
282
+ return types.UniTuple(dimtype, len(shape))
283
+
284
+ elif isinstance(shape, types.Tuple) and shape.count == 0:
285
+ # Force (0 x intp) for consistency with other shapes
286
+ return types.UniTuple(types.intp, 0)
287
+
288
+
289
+ @infer_getattr
290
+ class ArrayAttribute(AttributeTemplate):
291
+ key = types.Array
292
+
293
+ def resolve_dtype(self, ary):
294
+ return types.DType(ary.dtype)
295
+
296
+ def resolve_nbytes(self, ary):
297
+ return types.intp
298
+
299
+ def resolve_itemsize(self, ary):
300
+ return types.intp
301
+
302
+ def resolve_shape(self, ary):
303
+ return types.UniTuple(types.intp, ary.ndim)
304
+
305
+ def resolve_strides(self, ary):
306
+ return types.UniTuple(types.intp, ary.ndim)
307
+
308
+ def resolve_ndim(self, ary):
309
+ return types.intp
310
+
311
+ def resolve_size(self, ary):
312
+ return types.intp
313
+
314
+ def resolve_flat(self, ary):
315
+ return types.NumpyFlatType(ary)
316
+
317
+ def resolve_ctypes(self, ary):
318
+ return types.ArrayCTypes(ary)
319
+
320
+ def resolve_flags(self, ary):
321
+ return types.ArrayFlags(ary)
322
+
323
+ def resolve_T(self, ary):
324
+ if ary.ndim <= 1:
325
+ retty = ary
326
+ else:
327
+ layout = {"C": "F", "F": "C"}.get(ary.layout, "A")
328
+ retty = ary.copy(layout=layout)
329
+ return retty
330
+
331
+ def resolve_real(self, ary):
332
+ return self._resolve_real_imag(ary, attr='real')
333
+
334
+ def resolve_imag(self, ary):
335
+ return self._resolve_real_imag(ary, attr='imag')
336
+
337
+ def _resolve_real_imag(self, ary, attr):
338
+ if ary.dtype in types.complex_domain:
339
+ return ary.copy(dtype=ary.dtype.underlying_float, layout='A')
340
+ elif ary.dtype in types.number_domain:
341
+ res = ary.copy(dtype=ary.dtype)
342
+ if attr == 'imag':
343
+ res = res.copy(readonly=True)
344
+ return res
345
+ else:
346
+ msg = "cannot access .{} of array of {}"
347
+ raise TypingError(msg.format(attr, ary.dtype))
348
+
349
+ @bound_function("array.transpose")
350
+ def resolve_transpose(self, ary, args, kws):
351
+ def sentry_shape_scalar(ty):
352
+ if ty in types.number_domain:
353
+ # Guard against non integer type
354
+ if not isinstance(ty, types.Integer):
355
+ msg = "transpose() arg cannot be {0}".format(ty)
356
+ raise TypingError(msg)
357
+ return True
358
+ else:
359
+ return False
360
+
361
+ assert not kws
362
+ if len(args) == 0:
363
+ return signature(self.resolve_T(ary))
364
+
365
+ if len(args) == 1:
366
+ shape, = args
367
+
368
+ if sentry_shape_scalar(shape):
369
+ assert ary.ndim == 1
370
+ return signature(ary, *args)
371
+
372
+ if isinstance(shape, types.NoneType):
373
+ return signature(self.resolve_T(ary))
374
+
375
+ shape = normalize_shape(shape)
376
+ if shape is None:
377
+ return
378
+
379
+ assert ary.ndim == shape.count
380
+ return signature(self.resolve_T(ary).copy(layout="A"), shape)
381
+
382
+ else:
383
+ if any(not sentry_shape_scalar(a) for a in args):
384
+ msg = "transpose({0}) is not supported".format(
385
+ ', '.join(args))
386
+ raise TypingError(msg)
387
+ assert ary.ndim == len(args)
388
+ return signature(self.resolve_T(ary).copy(layout="A"), *args)
389
+
390
+ @bound_function("array.copy")
391
+ def resolve_copy(self, ary, args, kws):
392
+ assert not args
393
+ assert not kws
394
+ retty = ary.copy(layout="C", readonly=False)
395
+ return signature(retty)
396
+
397
+ @bound_function("array.item")
398
+ def resolve_item(self, ary, args, kws):
399
+ assert not kws
400
+ # We don't support explicit arguments as that's exactly equivalent
401
+ # to regular indexing. The no-argument form is interesting to
402
+ # allow some degree of genericity when writing functions.
403
+ if not args:
404
+ return signature(ary.dtype)
405
+
406
+ if numpy_version < (2, 0):
407
+ @bound_function("array.itemset")
408
+ def resolve_itemset(self, ary, args, kws):
409
+ assert not kws
410
+ # We don't support explicit arguments as that's exactly equivalent
411
+ # to regular indexing. The no-argument form is interesting to
412
+ # allow some degree of genericity when writing functions.
413
+ if len(args) == 1:
414
+ return signature(types.none, ary.dtype)
415
+
416
+ @bound_function("array.nonzero")
417
+ def resolve_nonzero(self, ary, args, kws):
418
+ assert not args
419
+ assert not kws
420
+ if ary.ndim == 0 and numpy_version >= (2, 1):
421
+ raise NumbaValueError(
422
+ "Calling nonzero on 0d arrays is not allowed."
423
+ " Use np.atleast_1d(scalar).nonzero() instead."
424
+ )
425
+ # 0-dim arrays return one result array
426
+ ndim = max(ary.ndim, 1)
427
+ retty = types.UniTuple(types.Array(types.intp, 1, 'C'), ndim)
428
+ return signature(retty)
429
+
430
+ @bound_function("array.reshape")
431
+ def resolve_reshape(self, ary, args, kws):
432
+ def sentry_shape_scalar(ty):
433
+ if ty in types.number_domain:
434
+ # Guard against non integer type
435
+ if not isinstance(ty, types.Integer):
436
+ raise TypingError("reshape() arg cannot be {0}".format(ty))
437
+ return True
438
+ else:
439
+ return False
440
+
441
+ assert not kws
442
+ if ary.layout not in 'CF':
443
+ # only work for contiguous array
444
+ raise TypingError("reshape() supports contiguous array only")
445
+
446
+ if len(args) == 1:
447
+ # single arg
448
+ shape, = args
449
+
450
+ if sentry_shape_scalar(shape):
451
+ ndim = 1
452
+ else:
453
+ shape = normalize_shape(shape)
454
+ if shape is None:
455
+ return
456
+ ndim = shape.count
457
+ retty = ary.copy(ndim=ndim)
458
+ return signature(retty, shape)
459
+
460
+ elif len(args) == 0:
461
+ # no arg
462
+ raise TypingError("reshape() take at least one arg")
463
+
464
+ else:
465
+ # vararg case
466
+ if any(not sentry_shape_scalar(a) for a in args):
467
+ raise TypingError("reshape({0}) is not supported".format(
468
+ ', '.join(map(str, args))))
469
+
470
+ retty = ary.copy(ndim=len(args))
471
+ return signature(retty, *args)
472
+
473
+ @bound_function("array.sort")
474
+ def resolve_sort(self, ary, args, kws):
475
+ assert not args
476
+ assert not kws
477
+ return signature(types.none)
478
+
479
+ @bound_function("array.argsort")
480
+ def resolve_argsort(self, ary, args, kws):
481
+ assert not args
482
+ kwargs = dict(kws)
483
+ kind = kwargs.pop('kind', types.StringLiteral('quicksort'))
484
+ if not isinstance(kind, types.StringLiteral):
485
+ raise TypingError('"kind" must be a string literal')
486
+ if kwargs:
487
+ msg = "Unsupported keywords: {!r}"
488
+ raise TypingError(msg.format([k for k in kwargs.keys()]))
489
+ if ary.ndim == 1:
490
+ def argsort_stub(kind='quicksort'):
491
+ pass
492
+ pysig = utils.pysignature(argsort_stub)
493
+ sig = signature(types.Array(types.intp, 1, 'C'), kind).replace(pysig=pysig)
494
+ return sig
495
+
496
+ @bound_function("array.view")
497
+ def resolve_view(self, ary, args, kws):
498
+ from .npydecl import parse_dtype
499
+ assert not kws
500
+ dtype, = args
501
+ dtype = parse_dtype(dtype)
502
+ if dtype is None:
503
+ return
504
+ retty = ary.copy(dtype=dtype)
505
+ return signature(retty, *args)
506
+
507
+ @bound_function("array.astype")
508
+ def resolve_astype(self, ary, args, kws):
509
+ from .npydecl import parse_dtype
510
+ assert not kws
511
+ dtype, = args
512
+ if isinstance(dtype, types.UnicodeType):
513
+ raise RequireLiteralValue(("array.astype if dtype is a string it "
514
+ "must be constant"))
515
+ dtype = parse_dtype(dtype)
516
+ if dtype is None:
517
+ return
518
+ if not self.context.can_convert(ary.dtype, dtype):
519
+ raise TypingError("astype(%s) not supported on %s: "
520
+ "cannot convert from %s to %s"
521
+ % (dtype, ary, ary.dtype, dtype))
522
+ layout = ary.layout if ary.layout in 'CF' else 'C'
523
+ # reset the write bit irrespective of whether the cast type is the same
524
+ # as the current dtype, this replicates numpy
525
+ retty = ary.copy(dtype=dtype, layout=layout, readonly=False)
526
+ return signature(retty, *args)
527
+
528
+ @bound_function("array.ravel")
529
+ def resolve_ravel(self, ary, args, kws):
530
+ # Only support no argument version (default order='C')
531
+ assert not kws
532
+ assert not args
533
+ copy_will_be_made = ary.layout != 'C'
534
+ readonly = not (copy_will_be_made or ary.mutable)
535
+ return signature(ary.copy(ndim=1, layout='C', readonly=readonly))
536
+
537
+ @bound_function("array.flatten")
538
+ def resolve_flatten(self, ary, args, kws):
539
+ # Only support no argument version (default order='C')
540
+ assert not kws
541
+ assert not args
542
+ # To ensure that Numba behaves exactly like NumPy,
543
+ # we also clear the read-only flag when doing a "flatten"
544
+ # Why? Two reasons:
545
+ # Because flatten always returns a copy. (see NumPy docs for "flatten")
546
+ # And because a copy always returns a writeable array.
547
+ # ref: https://numpy.org/doc/stable/reference/generated/numpy.copy.html
548
+ return signature(ary.copy(ndim=1, layout='C', readonly=False))
549
+
550
+ def generic_resolve(self, ary, attr):
551
+ # Resolution of other attributes, for record arrays
552
+ if isinstance(ary.dtype, types.Record):
553
+ if attr in ary.dtype.fields:
554
+ attr_dtype = ary.dtype.typeof(attr)
555
+ if isinstance(attr_dtype, types.NestedArray):
556
+ return ary.copy(
557
+ dtype=attr_dtype.dtype,
558
+ ndim=ary.ndim + attr_dtype.ndim,
559
+ layout='A'
560
+ )
561
+ else:
562
+ return ary.copy(dtype=attr_dtype, layout='A')
563
+
564
+
565
+ @infer_getattr
566
+ class DTypeAttr(AttributeTemplate):
567
+ key = types.DType
568
+
569
+ def resolve_type(self, ary):
570
+ # Wrap the numeric type in NumberClass
571
+ return types.NumberClass(ary.dtype)
572
+
573
+ def resolve_kind(self, ary):
574
+ if isinstance(ary.key, types.scalars.Float):
575
+ val = 'f'
576
+ elif isinstance(ary.key, types.scalars.Integer):
577
+ val = 'i'
578
+ else:
579
+ return None # other types not supported yet
580
+ return types.StringLiteral(val)
581
+
582
+
583
+ @infer
584
+ class StaticGetItemArray(AbstractTemplate):
585
+ key = "static_getitem"
586
+
587
+ def generic(self, args, kws):
588
+ # Resolution of members for record and structured arrays
589
+ ary, idx = args
590
+ if (isinstance(ary, types.Array) and isinstance(idx, str) and
591
+ isinstance(ary.dtype, types.Record)):
592
+ if idx in ary.dtype.fields:
593
+ attr_dtype = ary.dtype.typeof(idx)
594
+ if isinstance(attr_dtype, types.NestedArray):
595
+ ret = ary.copy(
596
+ dtype=attr_dtype.dtype,
597
+ ndim=ary.ndim + attr_dtype.ndim,
598
+ layout='A'
599
+ )
600
+ return signature(ret, *args)
601
+ else:
602
+ ret = ary.copy(dtype=attr_dtype, layout='A')
603
+ return signature(ret, *args)
604
+
605
+
606
+ @infer_getattr
607
+ class RecordAttribute(AttributeTemplate):
608
+ key = types.Record
609
+
610
+ def generic_resolve(self, record, attr):
611
+ ret = record.typeof(attr)
612
+ assert ret
613
+ return ret
614
+
615
+
616
+ @infer
617
+ class StaticGetItemRecord(AbstractTemplate):
618
+ key = "static_getitem"
619
+
620
+ def generic(self, args, kws):
621
+ # Resolution of members for records
622
+ record, idx = args
623
+ if isinstance(record, types.Record) and isinstance(idx, str):
624
+ if idx not in record.fields:
625
+ raise NumbaKeyError(f"Field '{idx}' was not found in record "
626
+ "with fields "
627
+ f"{tuple(record.fields.keys())}")
628
+ ret = record.typeof(idx)
629
+ assert ret
630
+ return signature(ret, *args)
631
+
632
+
633
+ @infer_global(operator.getitem)
634
+ class StaticGetItemLiteralRecord(AbstractTemplate):
635
+ def generic(self, args, kws):
636
+ # Resolution of members for records
637
+ record, idx = args
638
+ if isinstance(record, types.Record):
639
+ if isinstance(idx, types.StringLiteral):
640
+ if idx.literal_value not in record.fields:
641
+ msg = (f"Field '{idx.literal_value}' was not found in "
642
+ f"record with fields {tuple(record.fields.keys())}")
643
+ raise NumbaKeyError(msg)
644
+ ret = record.typeof(idx.literal_value)
645
+ assert ret
646
+ return signature(ret, *args)
647
+ elif isinstance(idx, types.IntegerLiteral):
648
+ if idx.literal_value >= len(record.fields):
649
+ msg = f"Requested index {idx.literal_value} is out of range"
650
+ raise NumbaIndexError(msg)
651
+ field_names = list(record.fields)
652
+ ret = record.typeof(field_names[idx.literal_value])
653
+ assert ret
654
+ return signature(ret, *args)
655
+
656
+
657
+ @infer
658
+ class StaticSetItemRecord(AbstractTemplate):
659
+ key = "static_setitem"
660
+
661
+ def generic(self, args, kws):
662
+ # Resolution of members for record and structured arrays
663
+ record, idx, value = args
664
+ if isinstance(record, types.Record):
665
+ if isinstance(idx, str):
666
+ expectedty = record.typeof(idx)
667
+ if self.context.can_convert(value, expectedty) is not None:
668
+ return signature(types.void, record, types.literal(idx),
669
+ value)
670
+ elif isinstance(idx, int):
671
+ if idx >= len(record.fields):
672
+ msg = f"Requested index {idx} is out of range"
673
+ raise NumbaIndexError(msg)
674
+ str_field = list(record.fields)[idx]
675
+ expectedty = record.typeof(str_field)
676
+ if self.context.can_convert(value, expectedty) is not None:
677
+ return signature(types.void, record, types.literal(idx),
678
+ value)
679
+
680
+
681
+ @infer_global(operator.setitem)
682
+ class StaticSetItemLiteralRecord(AbstractTemplate):
683
+ def generic(self, args, kws):
684
+ # Resolution of members for records
685
+ target, idx, value = args
686
+ if isinstance(target, types.Record) and isinstance(idx, types.StringLiteral):
687
+ if idx.literal_value not in target.fields:
688
+ msg = (f"Field '{idx.literal_value}' was not found in record "
689
+ f"with fields {tuple(target.fields.keys())}")
690
+ raise NumbaKeyError(msg)
691
+ expectedty = target.typeof(idx.literal_value)
692
+ if self.context.can_convert(value, expectedty) is not None:
693
+ return signature(types.void, target, idx, value)
694
+
695
+
696
+ @infer_getattr
697
+ class ArrayCTypesAttribute(AttributeTemplate):
698
+ key = types.ArrayCTypes
699
+
700
+ def resolve_data(self, ctinfo):
701
+ return types.uintp
702
+
703
+
704
+ @infer_getattr
705
+ class ArrayFlagsAttribute(AttributeTemplate):
706
+ key = types.ArrayFlags
707
+
708
+ def resolve_contiguous(self, ctflags):
709
+ return types.boolean
710
+
711
+ def resolve_c_contiguous(self, ctflags):
712
+ return types.boolean
713
+
714
+ def resolve_f_contiguous(self, ctflags):
715
+ return types.boolean
716
+
717
+
718
+ @infer_getattr
719
+ class NestedArrayAttribute(ArrayAttribute):
720
+ key = types.NestedArray
721
+
722
+
723
+ def _expand_integer(ty):
724
+ """
725
+ If *ty* is an integer, expand it to a machine int (like Numpy).
726
+ """
727
+ if isinstance(ty, types.Integer):
728
+ if ty.signed:
729
+ return max(types.intp, ty)
730
+ else:
731
+ return max(types.uintp, ty)
732
+ elif isinstance(ty, types.Boolean):
733
+ return types.intp
734
+ else:
735
+ return ty
736
+
737
+
738
+ def generic_homog(self, args, kws):
739
+ if args:
740
+ raise NumbaAssertionError("args not supported")
741
+ if kws:
742
+ raise NumbaAssertionError("kws not supported")
743
+
744
+ return signature(self.this.dtype, recvr=self.this)
745
+
746
+
747
+ def generic_expand(self, args, kws):
748
+ assert not args
749
+ assert not kws
750
+ return signature(_expand_integer(self.this.dtype), recvr=self.this)
751
+
752
+
753
+ def sum_expand(self, args, kws):
754
+ """
755
+ sum can be called with or without an axis parameter, and with or without
756
+ a dtype parameter
757
+ """
758
+ pysig = None
759
+ if 'axis' in kws and 'dtype' not in kws:
760
+ def sum_stub(axis):
761
+ pass
762
+ pysig = utils.pysignature(sum_stub)
763
+ # rewrite args
764
+ args = list(args) + [kws['axis']]
765
+ elif 'dtype' in kws and 'axis' not in kws:
766
+ def sum_stub(dtype):
767
+ pass
768
+ pysig = utils.pysignature(sum_stub)
769
+ # rewrite args
770
+ args = list(args) + [kws['dtype']]
771
+ elif 'dtype' in kws and 'axis' in kws:
772
+ def sum_stub(axis, dtype):
773
+ pass
774
+ pysig = utils.pysignature(sum_stub)
775
+ # rewrite args
776
+ args = list(args) + [kws['axis'], kws['dtype']]
777
+
778
+ args_len = len(args)
779
+ assert args_len <= 2
780
+ if args_len == 0:
781
+ # No axis or dtype parameter so the return type of the summation is a scalar
782
+ # of the type of the array.
783
+ out = signature(_expand_integer(self.this.dtype), *args,
784
+ recvr=self.this)
785
+ elif args_len == 1 and 'dtype' not in kws:
786
+ # There is an axis parameter, either arg or kwarg
787
+ if self.this.ndim == 1:
788
+ # 1d reduces to a scalar
789
+ return_type = _expand_integer(self.this.dtype)
790
+ else:
791
+ # the return type of this summation is an array of dimension one
792
+ # less than the input array.
793
+ return_type = types.Array(dtype=_expand_integer(self.this.dtype),
794
+ ndim=self.this.ndim-1, layout='C')
795
+ out = signature(return_type, *args, recvr=self.this)
796
+
797
+ elif args_len == 1 and 'dtype' in kws:
798
+ # No axis parameter so the return type of the summation is a scalar
799
+ # of the dtype parameter.
800
+ from .npydecl import parse_dtype
801
+ dtype, = args
802
+ dtype = parse_dtype(dtype)
803
+ out = signature(dtype, *args, recvr=self.this)
804
+
805
+ elif args_len == 2:
806
+ # There is an axis and dtype parameter, either arg or kwarg
807
+ from .npydecl import parse_dtype
808
+ dtype = parse_dtype(args[1])
809
+ return_type = dtype
810
+ if self.this.ndim != 1:
811
+ # 1d reduces to a scalar, 2d and above reduce dim by 1
812
+ # the return type of this summation is an array of dimension one
813
+ # less than the input array.
814
+ return_type = types.Array(dtype=return_type,
815
+ ndim=self.this.ndim-1, layout='C')
816
+ out = signature(return_type, *args, recvr=self.this)
817
+ else:
818
+ pass
819
+ return out.replace(pysig=pysig)
820
+
821
+
822
+ def generic_expand_cumulative(self, args, kws):
823
+ if args:
824
+ raise NumbaAssertionError("args unsupported")
825
+ if kws:
826
+ raise NumbaAssertionError("kwargs unsupported")
827
+ assert isinstance(self.this, types.Array)
828
+ return_type = types.Array(dtype=_expand_integer(self.this.dtype),
829
+ ndim=1, layout='C')
830
+ return signature(return_type, recvr=self.this)
831
+
832
+
833
+ def generic_hetero_real(self, args, kws):
834
+ assert not args
835
+ assert not kws
836
+ if isinstance(self.this.dtype, (types.Integer, types.Boolean)):
837
+ return signature(types.float64, recvr=self.this)
838
+ return signature(self.this.dtype, recvr=self.this)
839
+
840
+
841
+ def generic_hetero_always_real(self, args, kws):
842
+ assert not args
843
+ assert not kws
844
+ if isinstance(self.this.dtype, (types.Integer, types.Boolean)):
845
+ return signature(types.float64, recvr=self.this)
846
+ if isinstance(self.this.dtype, types.Complex):
847
+ return signature(self.this.dtype.underlying_float, recvr=self.this)
848
+ return signature(self.this.dtype, recvr=self.this)
849
+
850
+
851
+ def generic_index(self, args, kws):
852
+ assert not args
853
+ assert not kws
854
+ return signature(types.intp, recvr=self.this)
855
+
856
+
857
+ def install_array_method(name, generic, prefer_literal=True):
858
+ my_attr = {"key": "array." + name, "generic": generic,
859
+ "prefer_literal": prefer_literal}
860
+ temp_class = type("Array_" + name, (AbstractTemplate,), my_attr)
861
+
862
+ def array_attribute_attachment(self, ary):
863
+ return types.BoundFunction(temp_class, ary)
864
+
865
+ setattr(ArrayAttribute, "resolve_" + name, array_attribute_attachment)
866
+
867
+
868
+ # Functions that return a machine-width type, to avoid overflows
869
+ install_array_method("sum", sum_expand, prefer_literal=True)
870
+
871
+
872
+ @infer_global(operator.eq)
873
+ class CmpOpEqArray(AbstractTemplate):
874
+ #key = operator.eq
875
+
876
+ def generic(self, args, kws):
877
+ assert not kws
878
+ [va, vb] = args
879
+ if isinstance(va, types.Array) and va == vb:
880
+ return signature(va.copy(dtype=types.boolean), va, vb)
lib/python3.10/site-packages/numba/core/typing/bufproto.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Typing support for the buffer protocol (PEP 3118).
3
+ """
4
+
5
+ import array
6
+
7
+ from numba.core import types, config
8
+ from numba.core.errors import NumbaValueError
9
+
10
+
11
+ _pep3118_int_types = set('bBhHiIlLqQnN')
12
+
13
+ if config.USE_LEGACY_TYPE_SYSTEM: # Old type system
14
+ _pep3118_scalar_map = {
15
+ 'f': types.float32,
16
+ 'd': types.float64,
17
+ 'Zf': types.complex64,
18
+ 'Zd': types.complex128,
19
+ }
20
+ else: # New type system
21
+ _pep3118_scalar_map = {
22
+ # TODO: FIXME We need to modify the following Map to use Python Types.
23
+ # However currently here's nothing in Python types that maps
24
+ # to a float32 or a complex64
25
+ # 'f': types.np_float32,
26
+ 'd': types.py_float, # 64-bit float
27
+ # 'Zf': types.np_complex64,
28
+ 'Zd': types.py_complex, # 128-bit complex
29
+ }
30
+
31
+ _type_map = {
32
+ bytearray: types.ByteArray,
33
+ array.array: types.PyArray,
34
+ }
35
+
36
+ _type_map[memoryview] = types.MemoryView
37
+ _type_map[bytes] = types.Bytes
38
+
39
+
40
+ def decode_pep3118_format(fmt, itemsize):
41
+ """
42
+ Return the Numba type for an item with format string *fmt* and size
43
+ *itemsize* (in bytes).
44
+ """
45
+ # XXX reuse _dtype_from_pep3118() from np.core._internal?
46
+ if fmt in _pep3118_int_types:
47
+ # Determine int width and signedness
48
+ name = 'int%d' % (itemsize * 8,)
49
+ if fmt.isupper():
50
+ name = 'u' + name
51
+ return types.Integer(name)
52
+ try:
53
+ # For the hard-coded types above, consider "=" the same as "@"
54
+ # (the default). This is because Numpy sometimes adds "="
55
+ # in front of the PEP 3118 format string.
56
+ return _pep3118_scalar_map[fmt.lstrip('=')]
57
+ except KeyError:
58
+ raise NumbaValueError("unsupported PEP 3118 format %r" % (fmt,))
59
+
60
+
61
+ def get_type_class(typ):
62
+ """
63
+ Get the Numba type class for buffer-compatible Python *typ*.
64
+ """
65
+ try:
66
+ # Look up special case.
67
+ return _type_map[typ]
68
+ except KeyError:
69
+ # Fall back on generic one.
70
+ return types.Buffer
71
+
72
+
73
+ def infer_layout(val):
74
+ """
75
+ Infer layout of the given memoryview *val*.
76
+ """
77
+ return ('C' if val.c_contiguous else
78
+ 'F' if val.f_contiguous else
79
+ 'A')
lib/python3.10/site-packages/numba/core/typing/context.py ADDED
@@ -0,0 +1,741 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import defaultdict
2
+ from collections.abc import Sequence
3
+ import types as pytypes
4
+ import weakref
5
+ import threading
6
+ import contextlib
7
+ import operator
8
+
9
+ from numba.core import types, errors
10
+ from numba.core.typeconv import Conversion, rules
11
+ from numba.core.typing import templates
12
+ from numba.core.utils import order_by_target_specificity
13
+ from .typeof import typeof, Purpose
14
+
15
+ from numba.core import utils
16
+
17
+
18
+ class Rating(object):
19
+ __slots__ = 'promote', 'safe_convert', "unsafe_convert"
20
+
21
+ def __init__(self):
22
+ self.promote = 0
23
+ self.safe_convert = 0
24
+ self.unsafe_convert = 0
25
+
26
+ def astuple(self):
27
+ """Returns a tuple suitable for comparing with the worse situation
28
+ start first.
29
+ """
30
+ return (self.unsafe_convert, self.safe_convert, self.promote)
31
+
32
+ def __add__(self, other):
33
+ if type(self) is not type(other):
34
+ return NotImplemented
35
+ rsum = Rating()
36
+ rsum.promote = self.promote + other.promote
37
+ rsum.safe_convert = self.safe_convert + other.safe_convert
38
+ rsum.unsafe_convert = self.unsafe_convert + other.unsafe_convert
39
+ return rsum
40
+
41
+
42
+ class CallStack(Sequence):
43
+ """
44
+ A compile-time call stack
45
+ """
46
+
47
+ def __init__(self):
48
+ self._stack = []
49
+ self._lock = threading.RLock()
50
+
51
+ def __getitem__(self, index):
52
+ """
53
+ Returns item in the stack where index=0 is the top and index=1 is
54
+ the second item from the top.
55
+ """
56
+ return self._stack[len(self) - index - 1]
57
+
58
+ def __len__(self):
59
+ return len(self._stack)
60
+
61
+ @contextlib.contextmanager
62
+ def register(self, target, typeinfer, func_id, args):
63
+ # guard compiling the same function with the same signature
64
+ if self.match(func_id.func, args):
65
+ msg = "compiler re-entrant to the same function signature"
66
+ raise errors.NumbaRuntimeError(msg)
67
+ self._lock.acquire()
68
+ self._stack.append(CallFrame(target, typeinfer, func_id, args))
69
+ try:
70
+ yield
71
+ finally:
72
+ self._stack.pop()
73
+ self._lock.release()
74
+
75
+ def finditer(self, py_func):
76
+ """
77
+ Yields frame that matches the function object starting from the top
78
+ of stack.
79
+ """
80
+ for frame in self:
81
+ if frame.func_id.func is py_func:
82
+ yield frame
83
+
84
+ def findfirst(self, py_func):
85
+ """
86
+ Returns the first result from `.finditer(py_func)`; or None if no match.
87
+ """
88
+ try:
89
+ return next(self.finditer(py_func))
90
+ except StopIteration:
91
+ return
92
+
93
+ def match(self, py_func, args):
94
+ """
95
+ Returns first function that matches *py_func* and the arguments types in
96
+ *args*; or, None if no match.
97
+ """
98
+ for frame in self.finditer(py_func):
99
+ if frame.args == args:
100
+ return frame
101
+
102
+
103
+ class CallFrame(object):
104
+ """
105
+ A compile-time call frame
106
+ """
107
+ def __init__(self, target, typeinfer, func_id, args):
108
+ self.typeinfer = typeinfer
109
+ self.func_id = func_id
110
+ self.args = args
111
+ self.target = target
112
+ self._inferred_retty = set()
113
+
114
+ def __repr__(self):
115
+ return "CallFrame({}, {})".format(self.func_id, self.args)
116
+
117
+ def add_return_type(self, return_type):
118
+ """Add *return_type* to the list of inferred return-types.
119
+ If there are too many, raise `TypingError`.
120
+ """
121
+ # The maximum limit is picked arbitrarily.
122
+ # Don't think that this needs to be user configurable.
123
+ RETTY_LIMIT = 16
124
+ self._inferred_retty.add(return_type)
125
+ if len(self._inferred_retty) >= RETTY_LIMIT:
126
+ m = "Return type of recursive function does not converge"
127
+ raise errors.TypingError(m)
128
+
129
+
130
+ class BaseContext(object):
131
+ """A typing context for storing function typing constrain template.
132
+ """
133
+
134
+ def __init__(self):
135
+ # A list of installed registries
136
+ self._registries = {}
137
+ # Typing declarations extracted from the registries or other sources
138
+ self._functions = defaultdict(list)
139
+ self._attributes = defaultdict(list)
140
+ self._globals = utils.UniqueDict()
141
+ self.tm = rules.default_type_manager
142
+ self.callstack = CallStack()
143
+
144
+ # Initialize
145
+ self.init()
146
+
147
+ def init(self):
148
+ """
149
+ Initialize the typing context. Can be overridden by subclasses.
150
+ """
151
+
152
+ def refresh(self):
153
+ """
154
+ Refresh context with new declarations from known registries.
155
+ Useful for third-party extensions.
156
+ """
157
+ self.load_additional_registries()
158
+ # Some extensions may have augmented the builtin registry
159
+ self._load_builtins()
160
+
161
+ def explain_function_type(self, func):
162
+ """
163
+ Returns a string description of the type of a function
164
+ """
165
+ desc = []
166
+ defns = []
167
+ param = False
168
+ if isinstance(func, types.Callable):
169
+ sigs, param = func.get_call_signatures()
170
+ defns.extend(sigs)
171
+
172
+ elif func in self._functions:
173
+ for tpl in self._functions[func]:
174
+ param = param or hasattr(tpl, 'generic')
175
+ defns.extend(getattr(tpl, 'cases', []))
176
+
177
+ else:
178
+ msg = "No type info available for {func!r} as a callable."
179
+ desc.append(msg.format(func=func))
180
+
181
+ if defns:
182
+ desc = ['Known signatures:']
183
+ for sig in defns:
184
+ desc.append(' * {0}'.format(sig))
185
+
186
+ return '\n'.join(desc)
187
+
188
+ def resolve_function_type(self, func, args, kws):
189
+ """
190
+ Resolve function type *func* for argument types *args* and *kws*.
191
+ A signature is returned.
192
+ """
193
+ # Prefer user definition first
194
+ try:
195
+ res = self._resolve_user_function_type(func, args, kws)
196
+ except errors.TypingError as e:
197
+ # Capture any typing error
198
+ last_exception = e
199
+ res = None
200
+ else:
201
+ last_exception = None
202
+
203
+ # Return early we know there's a working user function
204
+ if res is not None:
205
+ return res
206
+
207
+ # Check builtin functions
208
+ res = self._resolve_builtin_function_type(func, args, kws)
209
+
210
+ # Re-raise last_exception if no function type has been found
211
+ if res is None and last_exception is not None:
212
+ raise last_exception
213
+
214
+ return res
215
+
216
+ def _resolve_builtin_function_type(self, func, args, kws):
217
+ # NOTE: we should reduce usage of this
218
+ if func in self._functions:
219
+ # Note: Duplicating code with types.Function.get_call_type().
220
+ # *defns* are CallTemplates.
221
+ defns = self._functions[func]
222
+ for defn in defns:
223
+ for support_literals in [True, False]:
224
+ if support_literals:
225
+ res = defn.apply(args, kws)
226
+ else:
227
+ fixedargs = [types.unliteral(a) for a in args]
228
+ res = defn.apply(fixedargs, kws)
229
+ if res is not None:
230
+ return res
231
+
232
+ def _resolve_user_function_type(self, func, args, kws, literals=None):
233
+ # It's not a known function type, perhaps it's a global?
234
+ functy = self._lookup_global(func)
235
+ if functy is not None:
236
+ func = functy
237
+
238
+ if isinstance(func, types.Type):
239
+ # If it's a type, it may support a __call__ method
240
+ func_type = self.resolve_getattr(func, "__call__")
241
+ if func_type is not None:
242
+ # The function has a __call__ method, type its call.
243
+ return self.resolve_function_type(func_type, args, kws)
244
+
245
+ if isinstance(func, types.Callable):
246
+ # XXX fold this into the __call__ attribute logic?
247
+ return func.get_call_type(self, args, kws)
248
+
249
+ def _get_attribute_templates(self, typ):
250
+ """
251
+ Get matching AttributeTemplates for the Numba type.
252
+ """
253
+ if typ in self._attributes:
254
+ for attrinfo in self._attributes[typ]:
255
+ yield attrinfo
256
+ else:
257
+ for cls in type(typ).__mro__:
258
+ if cls in self._attributes:
259
+ for attrinfo in self._attributes[cls]:
260
+ yield attrinfo
261
+
262
+ def resolve_getattr(self, typ, attr):
263
+ """
264
+ Resolve getting the attribute *attr* (a string) on the Numba type.
265
+ The attribute's type is returned, or None if resolution failed.
266
+ """
267
+ def core(typ):
268
+ out = self.find_matching_getattr_template(typ, attr)
269
+ if out:
270
+ return out['return_type']
271
+
272
+ out = core(typ)
273
+ if out is not None:
274
+ return out
275
+
276
+ # Try again without literals
277
+ out = core(types.unliteral(typ))
278
+ if out is not None:
279
+ return out
280
+
281
+ if isinstance(typ, types.Module):
282
+ attrty = self.resolve_module_constants(typ, attr)
283
+ if attrty is not None:
284
+ return attrty
285
+
286
+ def find_matching_getattr_template(self, typ, attr):
287
+
288
+ templates = list(self._get_attribute_templates(typ))
289
+
290
+ # get the order in which to try templates
291
+ from numba.core.target_extension import get_local_target # circular
292
+ target_hw = get_local_target(self)
293
+ order = order_by_target_specificity(target_hw, templates, fnkey=attr)
294
+
295
+ for template in order:
296
+ return_type = template.resolve(typ, attr)
297
+ if return_type is not None:
298
+ return {
299
+ 'template': template,
300
+ 'return_type': return_type,
301
+ }
302
+
303
+ def resolve_setattr(self, target, attr, value):
304
+ """
305
+ Resolve setting the attribute *attr* (a string) on the *target* type
306
+ to the given *value* type.
307
+ A function signature is returned, or None if resolution failed.
308
+ """
309
+ for attrinfo in self._get_attribute_templates(target):
310
+ expectedty = attrinfo.resolve(target, attr)
311
+ # NOTE: convertibility from *value* to *expectedty* is left to
312
+ # the caller.
313
+ if expectedty is not None:
314
+ return templates.signature(types.void, target, expectedty)
315
+
316
+ def resolve_static_getitem(self, value, index):
317
+ assert not isinstance(index, types.Type), index
318
+ args = value, index
319
+ kws = ()
320
+ return self.resolve_function_type("static_getitem", args, kws)
321
+
322
+ def resolve_static_setitem(self, target, index, value):
323
+ assert not isinstance(index, types.Type), index
324
+ args = target, index, value
325
+ kws = {}
326
+ return self.resolve_function_type("static_setitem", args, kws)
327
+
328
+ def resolve_setitem(self, target, index, value):
329
+ assert isinstance(index, types.Type), index
330
+ fnty = self.resolve_value_type(operator.setitem)
331
+ sig = fnty.get_call_type(self, (target, index, value), {})
332
+ return sig
333
+
334
+ def resolve_delitem(self, target, index):
335
+ args = target, index
336
+ kws = {}
337
+ fnty = self.resolve_value_type(operator.delitem)
338
+ sig = fnty.get_call_type(self, args, kws)
339
+ return sig
340
+
341
+ def resolve_module_constants(self, typ, attr):
342
+ """
343
+ Resolve module-level global constants.
344
+ Return None or the attribute type
345
+ """
346
+ assert isinstance(typ, types.Module)
347
+ attrval = getattr(typ.pymod, attr)
348
+ try:
349
+ return self.resolve_value_type(attrval)
350
+ except ValueError:
351
+ pass
352
+
353
+ def resolve_value_type(self, val):
354
+ """
355
+ Return the numba type of a Python value that is being used
356
+ as a runtime constant.
357
+ ValueError is raised for unsupported types.
358
+ """
359
+ try:
360
+ ty = typeof(val, Purpose.constant)
361
+ except ValueError as e:
362
+ # Make sure the exception doesn't hold a reference to the user
363
+ # value.
364
+ typeof_exc = utils.erase_traceback(e)
365
+ else:
366
+ return ty
367
+
368
+ if isinstance(val, types.ExternalFunction):
369
+ return val
370
+
371
+ # Try to look up target specific typing information
372
+ ty = self._get_global_type(val)
373
+ if ty is not None:
374
+ return ty
375
+
376
+ raise typeof_exc
377
+
378
+ def resolve_value_type_prefer_literal(self, value):
379
+ """Resolve value type and prefer Literal types whenever possible.
380
+ """
381
+ lit = types.maybe_literal(value)
382
+ if lit is None:
383
+ return self.resolve_value_type(value)
384
+ else:
385
+ return lit
386
+
387
+ def _get_global_type(self, gv):
388
+ ty = self._lookup_global(gv)
389
+ if ty is not None:
390
+ return ty
391
+ if isinstance(gv, pytypes.ModuleType):
392
+ return types.Module(gv)
393
+
394
+ def _load_builtins(self):
395
+ # Initialize declarations
396
+ from numba.core.typing import builtins, arraydecl, npdatetime # noqa: F401, E501
397
+ from numba.core.typing import ctypes_utils, bufproto # noqa: F401, E501
398
+ from numba.core.unsafe import eh # noqa: F401
399
+
400
+ self.install_registry(templates.builtin_registry)
401
+
402
+ def load_additional_registries(self):
403
+ """
404
+ Load target-specific registries. Can be overridden by subclasses.
405
+ """
406
+
407
+ def install_registry(self, registry):
408
+ """
409
+ Install a *registry* (a templates.Registry instance) of function,
410
+ attribute and global declarations.
411
+ """
412
+ try:
413
+ loader = self._registries[registry]
414
+ except KeyError:
415
+ loader = templates.RegistryLoader(registry)
416
+ self._registries[registry] = loader
417
+
418
+ from numba.core.target_extension import (get_local_target,
419
+ resolve_target_str)
420
+ current_target = get_local_target(self)
421
+
422
+ def is_for_this_target(ftcls):
423
+ metadata = getattr(ftcls, 'metadata', None)
424
+ if metadata is None:
425
+ return True
426
+
427
+ target_str = metadata.get('target')
428
+ if target_str is None:
429
+ return True
430
+
431
+ # There may be pending registrations for nonexistent targets.
432
+ # Ideally it would be impossible to leave a registration pending
433
+ # for an invalid target, but in practice this is exceedingly
434
+ # difficult to guard against - many things are registered at import
435
+ # time, and eagerly reporting an error when registering for invalid
436
+ # targets would require that all target registration code is
437
+ # executed prior to all typing registrations during the import
438
+ # process; attempting to enforce this would impose constraints on
439
+ # execution order during import that would be very difficult to
440
+ # resolve and maintain in the presence of typical code maintenance.
441
+ # Furthermore, these constraints would be imposed not only on
442
+ # Numba internals, but also on its dependents.
443
+ #
444
+ # Instead of that enforcement, we simply catch any occurrences of
445
+ # registrations for targets that don't exist, and report that
446
+ # they're not for this target. They will then not be encountered
447
+ # again during future typing context refreshes (because the
448
+ # loader's new registrations are a stream_list that doesn't yield
449
+ # previously-yielded items).
450
+ try:
451
+ ft_target = resolve_target_str(target_str)
452
+ except errors.NonexistentTargetError:
453
+ return False
454
+
455
+ return current_target.inherits_from(ft_target)
456
+
457
+ for ftcls in loader.new_registrations('functions'):
458
+ if not is_for_this_target(ftcls):
459
+ continue
460
+ self.insert_function(ftcls(self))
461
+ for ftcls in loader.new_registrations('attributes'):
462
+ if not is_for_this_target(ftcls):
463
+ continue
464
+ self.insert_attributes(ftcls(self))
465
+ for gv, gty in loader.new_registrations('globals'):
466
+ existing = self._lookup_global(gv)
467
+ if existing is None:
468
+ self.insert_global(gv, gty)
469
+ else:
470
+ # A type was already inserted, see if we can add to it
471
+ newty = existing.augment(gty)
472
+ if newty is None:
473
+ raise TypeError("cannot augment %s with %s"
474
+ % (existing, gty))
475
+ self._remove_global(gv)
476
+ self._insert_global(gv, newty)
477
+
478
+ def _lookup_global(self, gv):
479
+ """
480
+ Look up the registered type for global value *gv*.
481
+ """
482
+ try:
483
+ gv = weakref.ref(gv)
484
+ except TypeError:
485
+ pass
486
+ try:
487
+ return self._globals.get(gv, None)
488
+ except TypeError:
489
+ # Unhashable type
490
+ return None
491
+
492
+ def _insert_global(self, gv, gty):
493
+ """
494
+ Register type *gty* for value *gv*. Only a weak reference
495
+ to *gv* is kept, if possible.
496
+ """
497
+ def on_disposal(wr, pop=self._globals.pop):
498
+ # pop() is pre-looked up to avoid a crash late at shutdown on 3.5
499
+ # (https://bugs.python.org/issue25217)
500
+ pop(wr)
501
+ try:
502
+ gv = weakref.ref(gv, on_disposal)
503
+ except TypeError:
504
+ pass
505
+ self._globals[gv] = gty
506
+
507
+ def _remove_global(self, gv):
508
+ """
509
+ Remove the registered type for global value *gv*.
510
+ """
511
+ try:
512
+ gv = weakref.ref(gv)
513
+ except TypeError:
514
+ pass
515
+ del self._globals[gv]
516
+
517
+ def insert_global(self, gv, gty):
518
+ self._insert_global(gv, gty)
519
+
520
+ def insert_attributes(self, at):
521
+ key = at.key
522
+ self._attributes[key].append(at)
523
+
524
+ def insert_function(self, ft):
525
+ key = ft.key
526
+ self._functions[key].append(ft)
527
+
528
+ def insert_user_function(self, fn, ft):
529
+ """Insert a user function.
530
+
531
+ Args
532
+ ----
533
+ - fn:
534
+ object used as callee
535
+ - ft:
536
+ function template
537
+ """
538
+ self._insert_global(fn, types.Function(ft))
539
+
540
+ def can_convert(self, fromty, toty):
541
+ """
542
+ Check whether conversion is possible from *fromty* to *toty*.
543
+ If successful, return a numba.typeconv.Conversion instance;
544
+ otherwise None is returned.
545
+ """
546
+ if fromty == toty:
547
+ return Conversion.exact
548
+ else:
549
+ # First check with the type manager (some rules are registered
550
+ # at startup there, see numba.typeconv.rules)
551
+ conv = self.tm.check_compatible(fromty, toty)
552
+ if conv is not None:
553
+ return conv
554
+
555
+ # Fall back on type-specific rules
556
+ forward = fromty.can_convert_to(self, toty)
557
+ backward = toty.can_convert_from(self, fromty)
558
+ if backward is None:
559
+ return forward
560
+ elif forward is None:
561
+ return backward
562
+ else:
563
+ return min(forward, backward)
564
+
565
+ def _rate_arguments(self, actualargs, formalargs, unsafe_casting=True,
566
+ exact_match_required=False):
567
+ """
568
+ Rate the actual arguments for compatibility against the formal
569
+ arguments. A Rating instance is returned, or None if incompatible.
570
+ """
571
+ if len(actualargs) != len(formalargs):
572
+ return None
573
+ rate = Rating()
574
+ for actual, formal in zip(actualargs, formalargs):
575
+ conv = self.can_convert(actual, formal)
576
+ if conv is None:
577
+ return None
578
+ elif not unsafe_casting and conv >= Conversion.unsafe:
579
+ return None
580
+ elif exact_match_required and conv != Conversion.exact:
581
+ return None
582
+
583
+ if conv == Conversion.promote:
584
+ rate.promote += 1
585
+ elif conv == Conversion.safe:
586
+ rate.safe_convert += 1
587
+ elif conv == Conversion.unsafe:
588
+ rate.unsafe_convert += 1
589
+ elif conv == Conversion.exact:
590
+ pass
591
+ else:
592
+ raise AssertionError("unreachable", conv)
593
+
594
+ return rate
595
+
596
+ def install_possible_conversions(self, actualargs, formalargs):
597
+ """
598
+ Install possible conversions from the actual argument types to
599
+ the formal argument types in the C++ type manager.
600
+ Return True if all arguments can be converted.
601
+ """
602
+ if len(actualargs) != len(formalargs):
603
+ return False
604
+ for actual, formal in zip(actualargs, formalargs):
605
+ if self.tm.check_compatible(actual, formal) is not None:
606
+ # This conversion is already known
607
+ continue
608
+ conv = self.can_convert(actual, formal)
609
+ if conv is None:
610
+ return False
611
+ assert conv is not Conversion.exact
612
+ self.tm.set_compatible(actual, formal, conv)
613
+ return True
614
+
615
+ def resolve_overload(self, key, cases, args, kws,
616
+ allow_ambiguous=True, unsafe_casting=True,
617
+ exact_match_required=False):
618
+ """
619
+ Given actual *args* and *kws*, find the best matching
620
+ signature in *cases*, or None if none matches.
621
+ *key* is used for error reporting purposes.
622
+ If *allow_ambiguous* is False, a tie in the best matches
623
+ will raise an error.
624
+ If *unsafe_casting* is False, unsafe casting is forbidden.
625
+ """
626
+ assert not kws, "Keyword arguments are not supported, yet"
627
+ options = {
628
+ 'unsafe_casting': unsafe_casting,
629
+ 'exact_match_required': exact_match_required,
630
+ }
631
+ # Rate each case
632
+ candidates = []
633
+ for case in cases:
634
+ if len(args) == len(case.args):
635
+ rating = self._rate_arguments(args, case.args, **options)
636
+ if rating is not None:
637
+ candidates.append((rating.astuple(), case))
638
+
639
+ # Find the best case
640
+ candidates.sort(key=lambda i: i[0])
641
+ if candidates:
642
+ best_rate, best = candidates[0]
643
+ if not allow_ambiguous:
644
+ # Find whether there is a tie and if so, raise an error
645
+ tied = []
646
+ for rate, case in candidates:
647
+ if rate != best_rate:
648
+ break
649
+ tied.append(case)
650
+ if len(tied) > 1:
651
+ args = (key, args, '\n'.join(map(str, tied)))
652
+ msg = "Ambiguous overloading for %s %s:\n%s" % args
653
+ raise TypeError(msg)
654
+ # Simply return the best matching candidate in order.
655
+ # If there is a tie, since list.sort() is stable, the first case
656
+ # in the original order is returned.
657
+ # (this can happen if e.g. a function template exposes
658
+ # (int32, int32) -> int32 and (int64, int64) -> int64,
659
+ # and you call it with (int16, int16) arguments)
660
+ return best
661
+
662
+ def unify_types(self, *typelist):
663
+ # Sort the type list according to bit width before doing
664
+ # pairwise unification (with thanks to aterrel).
665
+ def keyfunc(obj):
666
+ """Uses bitwidth to order numeric-types.
667
+ Fallback to stable, deterministic sort.
668
+ """
669
+ return getattr(obj, 'bitwidth', 0)
670
+ typelist = sorted(typelist, key=keyfunc)
671
+ unified = typelist[0]
672
+ for tp in typelist[1:]:
673
+ unified = self.unify_pairs(unified, tp)
674
+ if unified is None:
675
+ break
676
+ return unified
677
+
678
+ def unify_pairs(self, first, second):
679
+ """
680
+ Try to unify the two given types. A third type is returned,
681
+ or None in case of failure.
682
+ """
683
+ if first == second:
684
+ return first
685
+
686
+ if first is types.undefined:
687
+ return second
688
+ elif second is types.undefined:
689
+ return first
690
+
691
+ # Types with special unification rules
692
+ unified = first.unify(self, second)
693
+ if unified is not None:
694
+ return unified
695
+
696
+ unified = second.unify(self, first)
697
+ if unified is not None:
698
+ return unified
699
+
700
+ # Other types with simple conversion rules
701
+ conv = self.can_convert(fromty=first, toty=second)
702
+ if conv is not None and conv <= Conversion.safe:
703
+ # Can convert from first to second
704
+ return second
705
+
706
+ conv = self.can_convert(fromty=second, toty=first)
707
+ if conv is not None and conv <= Conversion.safe:
708
+ # Can convert from second to first
709
+ return first
710
+
711
+ if isinstance(first, types.Literal) or \
712
+ isinstance(second, types.Literal):
713
+ first = types.unliteral(first)
714
+ second = types.unliteral(second)
715
+ return self.unify_pairs(first, second)
716
+
717
+ # Cannot unify
718
+ return None
719
+
720
+
721
+ class Context(BaseContext):
722
+
723
+ def load_additional_registries(self):
724
+ from . import (
725
+ cffi_utils,
726
+ cmathdecl,
727
+ enumdecl,
728
+ listdecl,
729
+ mathdecl,
730
+ npydecl,
731
+ setdecl,
732
+ dictdecl,
733
+ )
734
+ self.install_registry(cffi_utils.registry)
735
+ self.install_registry(cmathdecl.registry)
736
+ self.install_registry(enumdecl.registry)
737
+ self.install_registry(listdecl.registry)
738
+ self.install_registry(mathdecl.registry)
739
+ self.install_registry(npydecl.registry)
740
+ self.install_registry(setdecl.registry)
741
+ self.install_registry(dictdecl.registry)
lib/python3.10/site-packages/numba/core/typing/mathdecl.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ from numba.core.utils import _RedirectSubpackage
3
+ from numba.core import config
4
+
5
+ if config.USE_LEGACY_TYPE_SYSTEM:
6
+ sys.modules[__name__] = _RedirectSubpackage(
7
+ locals(),
8
+ "numba.core.typing.old_mathdecl"
9
+ )
10
+ else:
11
+ sys.modules[__name__] = _RedirectSubpackage(
12
+ locals(),
13
+ "numba.core.typing.new_mathdecl"
14
+ )
lib/python3.10/site-packages/numba/core/typing/new_cmathdecl.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cmath
2
+
3
+ from numba.core import types, utils
4
+ from numba.core.typing.templates import (AbstractTemplate, ConcreteTemplate,
5
+ signature, Registry)
6
+
7
+ registry = Registry()
8
+ infer_global = registry.register_global
9
+
10
+ # TODO: support non-complex arguments (floats and ints)
11
+
12
+ # TODO: New Type System
13
+ # These functions are part of the Python standard library
14
+ # and (without checking) probably accept anything which
15
+ # is "number"-like i.e. has a __float__, __int__, or
16
+ # __index__
17
+ # This needs fixing in the new type system
18
+
19
+
20
+ @infer_global(cmath.acos)
21
+ @infer_global(cmath.asin)
22
+ @infer_global(cmath.asinh)
23
+ @infer_global(cmath.atan)
24
+ @infer_global(cmath.atanh)
25
+ @infer_global(cmath.cos)
26
+ @infer_global(cmath.exp)
27
+ @infer_global(cmath.sin)
28
+ @infer_global(cmath.sqrt)
29
+ @infer_global(cmath.tan)
30
+ class CMath_unary(ConcreteTemplate):
31
+ cases = []
32
+
33
+
34
+ @infer_global(cmath.isinf)
35
+ @infer_global(cmath.isnan)
36
+ class CMath_predicate(ConcreteTemplate):
37
+ cases = []
38
+
39
+
40
+ @infer_global(cmath.isfinite)
41
+ class CMath_isfinite(CMath_predicate):
42
+ pass
43
+
44
+
45
+ @infer_global(cmath.log)
46
+ class Cmath_log(ConcreteTemplate):
47
+ # unary cmath.log()
48
+ cases = []
49
+ # binary cmath.log()
50
+ cases += []
lib/python3.10/site-packages/numba/core/typing/new_mathdecl.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from numba.core import types, utils
3
+ from numba.core.typing.templates import (AttributeTemplate, ConcreteTemplate,
4
+ signature, Registry)
5
+
6
+ # TODO: New Type System
7
+ # These functions are part of the Python standard library
8
+ # and (without checking) probably accept anything which
9
+ # is "number"-like i.e. has a __float__, __int__, or
10
+ # __index__
11
+ # This needs fixing in the new type system
12
+
13
+
14
+ registry = Registry()
15
+ infer_global = registry.register_global
16
+
17
+
18
+ @infer_global(math.exp)
19
+ @infer_global(math.expm1)
20
+ @infer_global(math.fabs)
21
+ @infer_global(math.sqrt)
22
+ @infer_global(math.log)
23
+ @infer_global(math.log1p)
24
+ @infer_global(math.log10)
25
+ @infer_global(math.log2)
26
+ @infer_global(math.sin)
27
+ @infer_global(math.cos)
28
+ @infer_global(math.tan)
29
+ @infer_global(math.sinh)
30
+ @infer_global(math.cosh)
31
+ @infer_global(math.tanh)
32
+ @infer_global(math.asin)
33
+ @infer_global(math.acos)
34
+ @infer_global(math.atan)
35
+ @infer_global(math.asinh)
36
+ @infer_global(math.acosh)
37
+ @infer_global(math.atanh)
38
+ @infer_global(math.degrees)
39
+ @infer_global(math.radians)
40
+ @infer_global(math.erf)
41
+ @infer_global(math.erfc)
42
+ @infer_global(math.gamma)
43
+ @infer_global(math.lgamma)
44
+ class Math_unary(ConcreteTemplate):
45
+ cases = []
46
+
47
+
48
+ @infer_global(math.atan2)
49
+ class Math_atan2(ConcreteTemplate):
50
+ cases = []
51
+
52
+
53
+ @infer_global(math.trunc)
54
+ class Math_converter(ConcreteTemplate):
55
+ cases = []
56
+
57
+
58
+ @infer_global(math.floor)
59
+ @infer_global(math.ceil)
60
+ class Math_floor_ceil(Math_converter):
61
+ pass
62
+
63
+
64
+ @infer_global(math.copysign)
65
+ class Math_copysign(ConcreteTemplate):
66
+ cases = []
67
+
68
+
69
+ @infer_global(math.hypot)
70
+ class Math_hypot(ConcreteTemplate):
71
+ cases = []
72
+
73
+
74
+ @infer_global(math.nextafter)
75
+ class Math_nextafter(ConcreteTemplate):
76
+ cases = []
77
+
78
+
79
+ @infer_global(math.isinf)
80
+ @infer_global(math.isnan)
81
+ class Math_predicate(ConcreteTemplate):
82
+ cases = []
83
+
84
+
85
+ @infer_global(math.isfinite)
86
+ class Math_isfinite(Math_predicate):
87
+ pass
88
+
89
+
90
+ @infer_global(math.pow)
91
+ class Math_pow(ConcreteTemplate):
92
+ cases = []
93
+
94
+
95
+ @infer_global(math.gcd)
96
+ class Math_gcd(ConcreteTemplate):
97
+ cases = []
98
+
99
+
100
+ @infer_global(math.frexp)
101
+ class Math_frexp(ConcreteTemplate):
102
+ cases = []
103
+
104
+
105
+ @infer_global(math.ldexp)
106
+ class Math_ldexp(ConcreteTemplate):
107
+ cases = []
lib/python3.10/site-packages/numba/core/typing/templates.py ADDED
@@ -0,0 +1,1337 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Define typing templates
3
+ """
4
+
5
+ from abc import ABC, abstractmethod
6
+ import functools
7
+ import sys
8
+ import inspect
9
+ import os.path
10
+ from collections import namedtuple
11
+ from collections.abc import Sequence
12
+ from types import MethodType, FunctionType, MappingProxyType
13
+
14
+ import numba
15
+ from numba.core import types, utils, targetconfig
16
+ from numba.core.errors import (
17
+ TypingError,
18
+ InternalError,
19
+ )
20
+ from numba.core.cpu_options import InlineOptions
21
+
22
+ # info store for inliner callback functions e.g. cost model
23
+ _inline_info = namedtuple('inline_info',
24
+ 'func_ir typemap calltypes signature')
25
+
26
+
27
+ class Signature(object):
28
+ """
29
+ The signature of a function call or operation, i.e. its argument types
30
+ and return type.
31
+ """
32
+
33
+ # XXX Perhaps the signature should be a BoundArguments, instead
34
+ # of separate args and pysig...
35
+ __slots__ = '_return_type', '_args', '_recvr', '_pysig'
36
+
37
+ def __init__(self, return_type, args, recvr, pysig=None):
38
+ if isinstance(args, list):
39
+ args = tuple(args)
40
+ self._return_type = return_type
41
+ self._args = args
42
+ self._recvr = recvr
43
+ self._pysig = pysig
44
+
45
+ @property
46
+ def return_type(self):
47
+ return self._return_type
48
+
49
+ @property
50
+ def args(self):
51
+ return self._args
52
+
53
+ @property
54
+ def recvr(self):
55
+ return self._recvr
56
+
57
+ @property
58
+ def pysig(self):
59
+ return self._pysig
60
+
61
+ def replace(self, **kwargs):
62
+ """Copy and replace the given attributes provided as keyword arguments.
63
+ Returns an updated copy.
64
+ """
65
+ curstate = dict(return_type=self.return_type,
66
+ args=self.args,
67
+ recvr=self.recvr,
68
+ pysig=self.pysig)
69
+ curstate.update(kwargs)
70
+ return Signature(**curstate)
71
+
72
+ def __getstate__(self):
73
+ """
74
+ Needed because of __slots__.
75
+ """
76
+ return self._return_type, self._args, self._recvr, self._pysig
77
+
78
+ def __setstate__(self, state):
79
+ """
80
+ Needed because of __slots__.
81
+ """
82
+ self._return_type, self._args, self._recvr, self._pysig = state
83
+
84
+ def __hash__(self):
85
+ return hash((self.args, self.return_type))
86
+
87
+ def __eq__(self, other):
88
+ if isinstance(other, Signature):
89
+ return (self.args == other.args and
90
+ self.return_type == other.return_type and
91
+ self.recvr == other.recvr and
92
+ self.pysig == other.pysig)
93
+
94
+ def __ne__(self, other):
95
+ return not (self == other)
96
+
97
+ def __repr__(self):
98
+ return "%s -> %s" % (self.args, self.return_type)
99
+
100
+ @property
101
+ def is_method(self):
102
+ """
103
+ Whether this signature represents a bound method or a regular
104
+ function.
105
+ """
106
+ return self.recvr is not None
107
+
108
+ def as_method(self):
109
+ """
110
+ Convert this signature to a bound method signature.
111
+ """
112
+ if self.recvr is not None:
113
+ return self
114
+ sig = signature(self.return_type, *self.args[1:],
115
+ recvr=self.args[0])
116
+
117
+ # Adjust the python signature
118
+ params = list(self.pysig.parameters.values())[1:]
119
+ sig = sig.replace(
120
+ pysig=utils.pySignature(
121
+ parameters=params,
122
+ return_annotation=self.pysig.return_annotation,
123
+ ),
124
+ )
125
+ return sig
126
+
127
+ def as_function(self):
128
+ """
129
+ Convert this signature to a regular function signature.
130
+ """
131
+ if self.recvr is None:
132
+ return self
133
+ sig = signature(self.return_type, *((self.recvr,) + self.args))
134
+ return sig
135
+
136
+ def as_type(self):
137
+ """
138
+ Convert this signature to a first-class function type.
139
+ """
140
+ return types.FunctionType(self)
141
+
142
+ def __unliteral__(self):
143
+ return signature(types.unliteral(self.return_type),
144
+ *map(types.unliteral, self.args))
145
+
146
+ def dump(self, tab=''):
147
+ c = self.as_type()._code
148
+ print(f'{tab}DUMP {type(self).__name__} [type code: {c}]')
149
+ print(f'{tab} Argument types:')
150
+ for a in self.args:
151
+ a.dump(tab=tab + ' | ')
152
+ print(f'{tab} Return type:')
153
+ self.return_type.dump(tab=tab + ' | ')
154
+ print(f'{tab}END DUMP')
155
+
156
+ def is_precise(self):
157
+ for atype in self.args:
158
+ if not atype.is_precise():
159
+ return False
160
+ return self.return_type.is_precise()
161
+
162
+
163
+ def make_concrete_template(name, key, signatures):
164
+ baseclasses = (ConcreteTemplate,)
165
+ gvars = dict(key=key, cases=list(signatures))
166
+ return type(name, baseclasses, gvars)
167
+
168
+
169
+ def make_callable_template(key, typer, recvr=None):
170
+ """
171
+ Create a callable template with the given key and typer function.
172
+ """
173
+ def generic(self):
174
+ return typer
175
+
176
+ name = "%s_CallableTemplate" % (key,)
177
+ bases = (CallableTemplate,)
178
+ class_dict = dict(key=key, generic=generic, recvr=recvr)
179
+ return type(name, bases, class_dict)
180
+
181
+
182
+ def signature(return_type, *args, **kws):
183
+ recvr = kws.pop('recvr', None)
184
+ assert not kws
185
+ return Signature(return_type, args, recvr=recvr)
186
+
187
+
188
+ def fold_arguments(pysig, args, kws, normal_handler, default_handler,
189
+ stararg_handler):
190
+ """
191
+ Given the signature *pysig*, explicit *args* and *kws*, resolve
192
+ omitted arguments and keyword arguments. A tuple of positional
193
+ arguments is returned.
194
+ Various handlers allow to process arguments:
195
+ - normal_handler(index, param, value) is called for normal arguments
196
+ - default_handler(index, param, default) is called for omitted arguments
197
+ - stararg_handler(index, param, values) is called for a "*args" argument
198
+ """
199
+ if isinstance(kws, Sequence):
200
+ # Normalize dict kws
201
+ kws = dict(kws)
202
+
203
+ # deal with kwonly args
204
+ params = pysig.parameters
205
+ kwonly = []
206
+ for name, p in params.items():
207
+ if p.kind == p.KEYWORD_ONLY:
208
+ kwonly.append(name)
209
+
210
+ if kwonly:
211
+ bind_args = args[:-len(kwonly)]
212
+ else:
213
+ bind_args = args
214
+ bind_kws = kws.copy()
215
+ if kwonly:
216
+ for idx, n in enumerate(kwonly):
217
+ bind_kws[n] = args[len(kwonly) + idx]
218
+
219
+ # now bind
220
+ try:
221
+ ba = pysig.bind(*bind_args, **bind_kws)
222
+ except TypeError as e:
223
+ # The binding attempt can raise if the args don't match up, this needs
224
+ # to be converted to a TypingError so that e.g. partial type inference
225
+ # doesn't just halt.
226
+ msg = (f"Cannot bind 'args={bind_args} kws={bind_kws}' to "
227
+ f"signature '{pysig}' due to \"{type(e).__name__}: {e}\".")
228
+ raise TypingError(msg)
229
+ for i, param in enumerate(pysig.parameters.values()):
230
+ name = param.name
231
+ default = param.default
232
+ if param.kind == param.VAR_POSITIONAL:
233
+ # stararg may be omitted, in which case its "default" value
234
+ # is simply the empty tuple
235
+ if name in ba.arguments:
236
+ argval = ba.arguments[name]
237
+ # NOTE: avoid wrapping the tuple type for stararg in another
238
+ # tuple.
239
+ if (len(argval) == 1 and
240
+ isinstance(argval[0], (types.StarArgTuple,
241
+ types.StarArgUniTuple))):
242
+ argval = tuple(argval[0])
243
+ else:
244
+ argval = ()
245
+ out = stararg_handler(i, param, argval)
246
+
247
+ ba.arguments[name] = out
248
+ elif name in ba.arguments:
249
+ # Non-stararg, present
250
+ ba.arguments[name] = normal_handler(i, param, ba.arguments[name])
251
+ else:
252
+ # Non-stararg, omitted
253
+ assert default is not param.empty
254
+ ba.arguments[name] = default_handler(i, param, default)
255
+ # Collect args in the right order
256
+ args = tuple(ba.arguments[param.name]
257
+ for param in pysig.parameters.values())
258
+ return args
259
+
260
+
261
+ class FunctionTemplate(ABC):
262
+ # Set to true to disable unsafe cast.
263
+ # subclass overide-able
264
+ unsafe_casting = True
265
+ # Set to true to require exact match without casting.
266
+ # subclass overide-able
267
+ exact_match_required = False
268
+ # Set to true to prefer literal arguments.
269
+ # Useful for definitions that specialize on literal but also support
270
+ # non-literals.
271
+ # subclass overide-able
272
+ prefer_literal = False
273
+ # metadata
274
+ metadata = {}
275
+
276
+ def __init__(self, context):
277
+ self.context = context
278
+
279
+ def _select(self, cases, args, kws):
280
+ options = {
281
+ 'unsafe_casting': self.unsafe_casting,
282
+ 'exact_match_required': self.exact_match_required,
283
+ }
284
+ selected = self.context.resolve_overload(self.key, cases, args, kws,
285
+ **options)
286
+ return selected
287
+
288
+ def get_impl_key(self, sig):
289
+ """
290
+ Return the key for looking up the implementation for the given
291
+ signature on the target context.
292
+ """
293
+ # Lookup the key on the class, to avoid binding it with `self`.
294
+ key = type(self).key
295
+ # On Python 2, we must also take care about unbound methods
296
+ if isinstance(key, MethodType):
297
+ assert key.im_self is None
298
+ key = key.im_func
299
+ return key
300
+
301
+ @classmethod
302
+ def get_source_code_info(cls, impl):
303
+ """
304
+ Gets the source information about function impl.
305
+ Returns:
306
+
307
+ code - str: source code as a string
308
+ firstlineno - int: the first line number of the function impl
309
+ path - str: the path to file containing impl
310
+
311
+ if any of the above are not available something generic is returned
312
+ """
313
+ try:
314
+ code, firstlineno = inspect.getsourcelines(impl)
315
+ except OSError: # missing source, probably a string
316
+ code = "None available (built from string?)"
317
+ firstlineno = 0
318
+ path = inspect.getsourcefile(impl)
319
+ if path is None:
320
+ path = "<unknown> (built from string?)"
321
+ return code, firstlineno, path
322
+
323
+ @abstractmethod
324
+ def get_template_info(self):
325
+ """
326
+ Returns a dictionary with information specific to the template that will
327
+ govern how error messages are displayed to users. The dictionary must
328
+ be of the form:
329
+ info = {
330
+ 'kind': "unknown", # str: The kind of template, e.g. "Overload"
331
+ 'name': "unknown", # str: The name of the source function
332
+ 'sig': "unknown", # str: The signature(s) of the source function
333
+ 'filename': "unknown", # str: The filename of the source function
334
+ 'lines': ("start", "end"), # tuple(int, int): The start and
335
+ end line of the source function.
336
+ 'docstring': "unknown" # str: The docstring of the source function
337
+ }
338
+ """
339
+ pass
340
+
341
+ def __str__(self):
342
+ info = self.get_template_info()
343
+ srcinfo = f"{info['filename']}:{info['lines'][0]}"
344
+ return f"<{self.__class__.__name__} {srcinfo}>"
345
+
346
+ __repr__ = __str__
347
+
348
+
349
+ class AbstractTemplate(FunctionTemplate):
350
+ """
351
+ Defines method ``generic(self, args, kws)`` which compute a possible
352
+ signature base on input types. The signature does not have to match the
353
+ input types. It is compared against the input types afterwards.
354
+ """
355
+
356
+ def apply(self, args, kws):
357
+ generic = getattr(self, "generic")
358
+ sig = generic(args, kws)
359
+ # Enforce that *generic()* must return None or Signature
360
+ if sig is not None:
361
+ if not isinstance(sig, Signature):
362
+ raise AssertionError(
363
+ "generic() must return a Signature or None. "
364
+ "{} returned {}".format(generic, type(sig)),
365
+ )
366
+
367
+ # Unpack optional type if no matching signature
368
+ if not sig and any(isinstance(x, types.Optional) for x in args):
369
+ def unpack_opt(x):
370
+ if isinstance(x, types.Optional):
371
+ return x.type
372
+ else:
373
+ return x
374
+
375
+ args = list(map(unpack_opt, args))
376
+ assert not kws # Not supported yet
377
+ sig = generic(args, kws)
378
+
379
+ return sig
380
+
381
+ def get_template_info(self):
382
+ impl = getattr(self, "generic")
383
+ basepath = os.path.dirname(os.path.dirname(numba.__file__))
384
+
385
+ code, firstlineno, path = self.get_source_code_info(impl)
386
+ sig = str(utils.pysignature(impl))
387
+ info = {
388
+ 'kind': "overload",
389
+ 'name': getattr(impl, '__qualname__', impl.__name__),
390
+ 'sig': sig,
391
+ 'filename': utils.safe_relpath(path, start=basepath),
392
+ 'lines': (firstlineno, firstlineno + len(code) - 1),
393
+ 'docstring': impl.__doc__
394
+ }
395
+ return info
396
+
397
+
398
+ class CallableTemplate(FunctionTemplate):
399
+ """
400
+ Base class for a template defining a ``generic(self)`` method
401
+ returning a callable to be called with the actual ``*args`` and
402
+ ``**kwargs`` representing the call signature. The callable has
403
+ to return a return type, a full signature, or None. The signature
404
+ does not have to match the input types. It is compared against the
405
+ input types afterwards.
406
+ """
407
+ recvr = None
408
+
409
+ def apply(self, args, kws):
410
+ generic = getattr(self, "generic")
411
+ typer = generic()
412
+ match_sig = inspect.signature(typer)
413
+ try:
414
+ match_sig.bind(*args, **kws)
415
+ except TypeError as e:
416
+ # bind failed, raise, if there's a
417
+ # ValueError then there's likely unrecoverable
418
+ # problems
419
+ raise TypingError(str(e)) from e
420
+
421
+ sig = typer(*args, **kws)
422
+
423
+ # Unpack optional type if no matching signature
424
+ if sig is None:
425
+ if any(isinstance(x, types.Optional) for x in args):
426
+ def unpack_opt(x):
427
+ if isinstance(x, types.Optional):
428
+ return x.type
429
+ else:
430
+ return x
431
+
432
+ args = list(map(unpack_opt, args))
433
+ sig = typer(*args, **kws)
434
+ if sig is None:
435
+ return
436
+
437
+ # Get the pysig
438
+ try:
439
+ pysig = typer.pysig
440
+ except AttributeError:
441
+ pysig = utils.pysignature(typer)
442
+
443
+ # Fold any keyword arguments
444
+ bound = pysig.bind(*args, **kws)
445
+ if bound.kwargs:
446
+ raise TypingError("unsupported call signature")
447
+ if not isinstance(sig, Signature):
448
+ # If not a signature, `sig` is assumed to be the return type
449
+ if not isinstance(sig, types.Type):
450
+ raise TypeError("invalid return type for callable template: "
451
+ "got %r" % (sig,))
452
+ sig = signature(sig, *bound.args)
453
+ if self.recvr is not None:
454
+ sig = sig.replace(recvr=self.recvr)
455
+ # Hack any omitted parameters out of the typer's pysig,
456
+ # as lowering expects an exact match between formal signature
457
+ # and actual args.
458
+ if len(bound.args) < len(pysig.parameters):
459
+ parameters = list(pysig.parameters.values())[:len(bound.args)]
460
+ pysig = pysig.replace(parameters=parameters)
461
+ sig = sig.replace(pysig=pysig)
462
+ cases = [sig]
463
+ return self._select(cases, bound.args, bound.kwargs)
464
+
465
+ def get_template_info(self):
466
+ impl = getattr(self, "generic")
467
+ basepath = os.path.dirname(os.path.dirname(numba.__file__))
468
+ code, firstlineno, path = self.get_source_code_info(impl)
469
+ sig = str(utils.pysignature(impl))
470
+ info = {
471
+ 'kind': "overload",
472
+ 'name': getattr(self.key, '__name__',
473
+ getattr(impl, '__qualname__', impl.__name__),),
474
+ 'sig': sig,
475
+ 'filename': utils.safe_relpath(path, start=basepath),
476
+ 'lines': (firstlineno, firstlineno + len(code) - 1),
477
+ 'docstring': impl.__doc__
478
+ }
479
+ return info
480
+
481
+
482
+ class ConcreteTemplate(FunctionTemplate):
483
+ """
484
+ Defines attributes "cases" as a list of signature to match against the
485
+ given input types.
486
+ """
487
+
488
+ def apply(self, args, kws):
489
+ cases = getattr(self, 'cases')
490
+ return self._select(cases, args, kws)
491
+
492
+ def get_template_info(self):
493
+ import operator
494
+ name = getattr(self.key, '__name__', "unknown")
495
+ op_func = getattr(operator, name, None)
496
+
497
+ kind = "Type restricted function"
498
+ if op_func is not None:
499
+ if self.key is op_func:
500
+ kind = "operator overload"
501
+ info = {
502
+ 'kind': kind,
503
+ 'name': name,
504
+ 'sig': "unknown",
505
+ 'filename': "unknown",
506
+ 'lines': ("unknown", "unknown"),
507
+ 'docstring': "unknown"
508
+ }
509
+ return info
510
+
511
+
512
+ class _EmptyImplementationEntry(InternalError):
513
+ def __init__(self, reason):
514
+ super(_EmptyImplementationEntry, self).__init__(
515
+ "_EmptyImplementationEntry({!r})".format(reason),
516
+ )
517
+
518
+
519
+ class _OverloadFunctionTemplate(AbstractTemplate):
520
+ """
521
+ A base class of templates for overload functions.
522
+ """
523
+
524
+ def _validate_sigs(self, typing_func, impl_func):
525
+ # check that the impl func and the typing func have the same signature!
526
+ typing_sig = utils.pysignature(typing_func)
527
+ impl_sig = utils.pysignature(impl_func)
528
+ # the typing signature is considered golden and must be adhered to by
529
+ # the implementation...
530
+ # Things that are valid:
531
+ # 1. args match exactly
532
+ # 2. kwargs match exactly in name and default value
533
+ # 3. Use of *args in the same location by the same name in both typing
534
+ # and implementation signature
535
+ # 4. Use of *args in the implementation signature to consume any number
536
+ # of arguments in the typing signature.
537
+ # Things that are invalid:
538
+ # 5. Use of *args in the typing signature that is not replicated
539
+ # in the implementing signature
540
+ # 6. Use of **kwargs
541
+
542
+ def get_args_kwargs(sig):
543
+ kws = []
544
+ args = []
545
+ pos_arg = None
546
+ for x in sig.parameters.values():
547
+ if x.default == utils.pyParameter.empty:
548
+ args.append(x)
549
+ if x.kind == utils.pyParameter.VAR_POSITIONAL:
550
+ pos_arg = x
551
+ elif x.kind == utils.pyParameter.VAR_KEYWORD:
552
+ msg = ("The use of VAR_KEYWORD (e.g. **kwargs) is "
553
+ "unsupported. (offending argument name is '%s')")
554
+ raise InternalError(msg % x)
555
+ else:
556
+ kws.append(x)
557
+ return args, kws, pos_arg
558
+
559
+ ty_args, ty_kws, ty_pos = get_args_kwargs(typing_sig)
560
+ im_args, im_kws, im_pos = get_args_kwargs(impl_sig)
561
+
562
+ sig_fmt = ("Typing signature: %s\n"
563
+ "Implementation signature: %s")
564
+ sig_str = sig_fmt % (typing_sig, impl_sig)
565
+
566
+ err_prefix = "Typing and implementation arguments differ in "
567
+
568
+ a = ty_args
569
+ b = im_args
570
+ if ty_pos:
571
+ if not im_pos:
572
+ # case 5. described above
573
+ msg = ("VAR_POSITIONAL (e.g. *args) argument kind (offending "
574
+ "argument name is '%s') found in the typing function "
575
+ "signature, but is not in the implementing function "
576
+ "signature.\n%s") % (ty_pos, sig_str)
577
+ raise InternalError(msg)
578
+ else:
579
+ if im_pos:
580
+ # no *args in typing but there's a *args in the implementation
581
+ # this is case 4. described above
582
+ b = im_args[:im_args.index(im_pos)]
583
+ try:
584
+ a = ty_args[:ty_args.index(b[-1]) + 1]
585
+ except ValueError:
586
+ # there's no b[-1] arg name in the ty_args, something is
587
+ # very wrong, we can't work out a diff (*args consumes
588
+ # unknown quantity of args) so just report first error
589
+ specialized = "argument names.\n%s\nFirst difference: '%s'"
590
+ msg = err_prefix + specialized % (sig_str, b[-1])
591
+ raise InternalError(msg)
592
+
593
+ def gen_diff(typing, implementing):
594
+ diff = set(typing) ^ set(implementing)
595
+ return "Difference: %s" % diff
596
+
597
+ if a != b:
598
+ specialized = "argument names.\n%s\n%s" % (sig_str, gen_diff(a, b))
599
+ raise InternalError(err_prefix + specialized)
600
+
601
+ # ensure kwargs are the same
602
+ ty = [x.name for x in ty_kws]
603
+ im = [x.name for x in im_kws]
604
+ if ty != im:
605
+ specialized = "keyword argument names.\n%s\n%s"
606
+ msg = err_prefix + specialized % (sig_str, gen_diff(ty_kws, im_kws))
607
+ raise InternalError(msg)
608
+ same = [x.default for x in ty_kws] == [x.default for x in im_kws]
609
+ if not same:
610
+ specialized = "keyword argument default values.\n%s\n%s"
611
+ msg = err_prefix + specialized % (sig_str, gen_diff(ty_kws, im_kws))
612
+ raise InternalError(msg)
613
+
614
+ def generic(self, args, kws):
615
+ """
616
+ Type the overloaded function by compiling the appropriate
617
+ implementation for the given args.
618
+ """
619
+ from numba.core.typed_passes import PreLowerStripPhis
620
+
621
+ disp, new_args = self._get_impl(args, kws)
622
+ if disp is None:
623
+ return
624
+ # Compile and type it for the given types
625
+ disp_type = types.Dispatcher(disp)
626
+ # Store the compiled overload for use in the lowering phase if there's
627
+ # no inlining required (else functions are being compiled which will
628
+ # never be used as they are inlined)
629
+ if not self._inline.is_never_inline:
630
+ # need to run the compiler front end up to type inference to compute
631
+ # a signature
632
+ from numba.core import typed_passes, compiler
633
+ from numba.core.inline_closurecall import InlineWorker
634
+ fcomp = disp._compiler
635
+ flags = compiler.Flags()
636
+
637
+ # Updating these causes problems?!
638
+ #fcomp.targetdescr.options.parse_as_flags(flags,
639
+ # fcomp.targetoptions)
640
+ #flags = fcomp._customize_flags(flags)
641
+
642
+ # spoof a compiler pipline like the one that will be in use
643
+ tyctx = fcomp.targetdescr.typing_context
644
+ tgctx = fcomp.targetdescr.target_context
645
+ compiler_inst = fcomp.pipeline_class(tyctx, tgctx, None, None, None,
646
+ flags, None, )
647
+ inline_worker = InlineWorker(tyctx, tgctx, fcomp.locals,
648
+ compiler_inst, flags, None,)
649
+
650
+ # If the inlinee contains something to trigger literal arg dispatch
651
+ # then the pipeline call will unconditionally fail due to a raised
652
+ # ForceLiteralArg exception. Therefore `resolve` is run first, as
653
+ # type resolution must occur at some point, this will hit any
654
+ # `literally` calls and because it's going via the dispatcher will
655
+ # handle them correctly i.e. ForceLiteralArg propagates. This having
656
+ # the desired effect of ensuring the pipeline call is only made in
657
+ # situations that will succeed. For context see #5887.
658
+ resolve = disp_type.dispatcher.get_call_template
659
+ template, pysig, folded_args, kws = resolve(new_args, kws)
660
+ ir = inline_worker.run_untyped_passes(
661
+ disp_type.dispatcher.py_func, enable_ssa=True
662
+ )
663
+
664
+ (
665
+ typemap,
666
+ return_type,
667
+ calltypes,
668
+ _
669
+ ) = typed_passes.type_inference_stage(
670
+ self.context, tgctx, ir, folded_args, None)
671
+ ir = PreLowerStripPhis()._strip_phi_nodes(ir)
672
+ ir._definitions = numba.core.ir_utils.build_definitions(ir.blocks)
673
+
674
+ sig = Signature(return_type, folded_args, None)
675
+ # this stores a load of info for the cost model function if supplied
676
+ # it by default is None
677
+ self._inline_overloads[sig.args] = {'folded_args': folded_args}
678
+ # this stores the compiled overloads, if there's no compiled
679
+ # overload available i.e. function is always inlined, the key still
680
+ # needs to exist for type resolution
681
+
682
+ # NOTE: If lowering is failing on a `_EmptyImplementationEntry`,
683
+ # the inliner has failed to inline this entry correctly.
684
+ impl_init = _EmptyImplementationEntry('always inlined')
685
+ self._compiled_overloads[sig.args] = impl_init
686
+ if not self._inline.is_always_inline:
687
+ # this branch is here because a user has supplied a function to
688
+ # determine whether to inline or not. As a result both compiled
689
+ # function and inliner info needed, delaying the computation of
690
+ # this leads to an internal state mess at present. TODO: Fix!
691
+ sig = disp_type.get_call_type(self.context, new_args, kws)
692
+ self._compiled_overloads[sig.args] = disp_type.get_overload(sig)
693
+ # store the inliner information, it's used later in the cost
694
+ # model function call
695
+ iinfo = _inline_info(ir, typemap, calltypes, sig)
696
+ self._inline_overloads[sig.args] = {'folded_args': folded_args,
697
+ 'iinfo': iinfo}
698
+ else:
699
+ sig = disp_type.get_call_type(self.context, new_args, kws)
700
+ if sig is None: # can't resolve for this target
701
+ return None
702
+ self._compiled_overloads[sig.args] = disp_type.get_overload(sig)
703
+ return sig
704
+
705
+ def _get_impl(self, args, kws):
706
+ """Get implementation given the argument types.
707
+
708
+ Returning a Dispatcher object. The Dispatcher object is cached
709
+ internally in `self._impl_cache`.
710
+ """
711
+ flags = targetconfig.ConfigStack.top_or_none()
712
+ cache_key = self.context, tuple(args), tuple(kws.items()), flags
713
+ try:
714
+ impl, args = self._impl_cache[cache_key]
715
+ return impl, args
716
+ except KeyError:
717
+ # pass and try outside the scope so as to not have KeyError with a
718
+ # nested addition error in the case the _build_impl fails
719
+ pass
720
+ impl, args = self._build_impl(cache_key, args, kws)
721
+ return impl, args
722
+
723
+ def _get_jit_decorator(self):
724
+ """Gets a jit decorator suitable for the current target"""
725
+
726
+ from numba.core.target_extension import (target_registry,
727
+ get_local_target,
728
+ jit_registry)
729
+
730
+ jitter_str = self.metadata.get('target', 'generic')
731
+ jitter = jit_registry.get(jitter_str, None)
732
+
733
+ if jitter is None:
734
+ # No JIT known for target string, see if something is
735
+ # registered for the string and report if not.
736
+ target_class = target_registry.get(jitter_str, None)
737
+ if target_class is None:
738
+ msg = ("Unknown target '{}', has it been ",
739
+ "registered?")
740
+ raise ValueError(msg.format(jitter_str))
741
+
742
+ target_hw = get_local_target(self.context)
743
+
744
+ # check that the requested target is in the hierarchy for the
745
+ # current frame's target.
746
+ if not issubclass(target_hw, target_class):
747
+ msg = "No overloads exist for the requested target: {}."
748
+
749
+ jitter = jit_registry[target_hw]
750
+
751
+ if jitter is None:
752
+ raise ValueError("Cannot find a suitable jit decorator")
753
+
754
+ return jitter
755
+
756
+ def _build_impl(self, cache_key, args, kws):
757
+ """Build and cache the implementation.
758
+
759
+ Given the positional (`args`) and keyword arguments (`kws`), obtains
760
+ the `overload` implementation and wrap it in a Dispatcher object.
761
+ The expected argument types are returned for use by type-inference.
762
+ The expected argument types are only different from the given argument
763
+ types if there is an imprecise type in the given argument types.
764
+
765
+ Parameters
766
+ ----------
767
+ cache_key : hashable
768
+ The key used for caching the implementation.
769
+ args : Tuple[Type]
770
+ Types of positional argument.
771
+ kws : Dict[Type]
772
+ Types of keyword argument.
773
+
774
+ Returns
775
+ -------
776
+ disp, args :
777
+ On success, returns `(Dispatcher, Tuple[Type])`.
778
+ On failure, returns `(None, None)`.
779
+
780
+ """
781
+ jitter = self._get_jit_decorator()
782
+
783
+ # Get the overload implementation for the given types
784
+ ov_sig = inspect.signature(self._overload_func)
785
+ try:
786
+ ov_sig.bind(*args, **kws)
787
+ except TypeError as e:
788
+ # bind failed, raise, if there's a
789
+ # ValueError then there's likely unrecoverable
790
+ # problems
791
+ raise TypingError(str(e)) from e
792
+ else:
793
+ ovf_result = self._overload_func(*args, **kws)
794
+
795
+ if ovf_result is None:
796
+ # No implementation => fail typing
797
+ self._impl_cache[cache_key] = None, None
798
+ return None, None
799
+ elif isinstance(ovf_result, tuple):
800
+ # The implementation returned a signature that the type-inferencer
801
+ # should be using.
802
+ sig, pyfunc = ovf_result
803
+ args = sig.args
804
+ kws = {}
805
+ cache_key = None # don't cache
806
+ else:
807
+ # Regular case
808
+ pyfunc = ovf_result
809
+
810
+ # Check type of pyfunc
811
+ if not isinstance(pyfunc, FunctionType):
812
+ msg = ("Implementation function returned by `@overload` "
813
+ "has an unexpected type. Got {}")
814
+ raise AssertionError(msg.format(pyfunc))
815
+
816
+ # check that the typing and impl sigs match up
817
+ if self._strict:
818
+ self._validate_sigs(self._overload_func, pyfunc)
819
+ # Make dispatcher
820
+ jitdecor = jitter(**self._jit_options)
821
+ disp = jitdecor(pyfunc)
822
+ # Make sure that the implementation can be fully compiled
823
+ disp_type = types.Dispatcher(disp)
824
+ disp_type.get_call_type(self.context, args, kws)
825
+ if cache_key is not None:
826
+ self._impl_cache[cache_key] = disp, args
827
+ return disp, args
828
+
829
+ def get_impl_key(self, sig):
830
+ """
831
+ Return the key for looking up the implementation for the given
832
+ signature on the target context.
833
+ """
834
+ return self._compiled_overloads[sig.args]
835
+
836
+ @classmethod
837
+ def get_source_info(cls):
838
+ """Return a dictionary with information about the source code of the
839
+ implementation.
840
+
841
+ Returns
842
+ -------
843
+ info : dict
844
+ - "kind" : str
845
+ The implementation kind.
846
+ - "name" : str
847
+ The name of the function that provided the definition.
848
+ - "sig" : str
849
+ The formatted signature of the function.
850
+ - "filename" : str
851
+ The name of the source file.
852
+ - "lines": tuple (int, int)
853
+ First and list line number.
854
+ - "docstring": str
855
+ The docstring of the definition.
856
+ """
857
+ basepath = os.path.dirname(os.path.dirname(numba.__file__))
858
+ impl = cls._overload_func
859
+ code, firstlineno, path = cls.get_source_code_info(impl)
860
+ sig = str(utils.pysignature(impl))
861
+ info = {
862
+ 'kind': "overload",
863
+ 'name': getattr(impl, '__qualname__', impl.__name__),
864
+ 'sig': sig,
865
+ 'filename': utils.safe_relpath(path, start=basepath),
866
+ 'lines': (firstlineno, firstlineno + len(code) - 1),
867
+ 'docstring': impl.__doc__
868
+ }
869
+ return info
870
+
871
+ def get_template_info(self):
872
+ basepath = os.path.dirname(os.path.dirname(numba.__file__))
873
+ impl = self._overload_func
874
+ code, firstlineno, path = self.get_source_code_info(impl)
875
+ sig = str(utils.pysignature(impl))
876
+ info = {
877
+ 'kind': "overload",
878
+ 'name': getattr(impl, '__qualname__', impl.__name__),
879
+ 'sig': sig,
880
+ 'filename': utils.safe_relpath(path, start=basepath),
881
+ 'lines': (firstlineno, firstlineno + len(code) - 1),
882
+ 'docstring': impl.__doc__
883
+ }
884
+ return info
885
+
886
+
887
+ def make_overload_template(func, overload_func, jit_options, strict,
888
+ inline, prefer_literal=False, **kwargs):
889
+ """
890
+ Make a template class for function *func* overloaded by *overload_func*.
891
+ Compiler options are passed as a dictionary to *jit_options*.
892
+ """
893
+ func_name = getattr(func, '__name__', str(func))
894
+ name = "OverloadTemplate_%s" % (func_name,)
895
+ base = _OverloadFunctionTemplate
896
+ dct = dict(key=func, _overload_func=staticmethod(overload_func),
897
+ _impl_cache={}, _compiled_overloads={}, _jit_options=jit_options,
898
+ _strict=strict, _inline=staticmethod(InlineOptions(inline)),
899
+ _inline_overloads={}, prefer_literal=prefer_literal,
900
+ metadata=kwargs)
901
+ return type(base)(name, (base,), dct)
902
+
903
+
904
+ class _TemplateTargetHelperMixin(object):
905
+ """Mixin for helper methods that assist with target/registry resolution"""
906
+
907
+ def _get_target_registry(self, reason):
908
+ """Returns the registry for the current target.
909
+
910
+ Parameters
911
+ ----------
912
+ reason: str
913
+ Reason for the resolution. Expects a noun.
914
+ Returns
915
+ -------
916
+ reg : a registry suitable for the current target.
917
+ """
918
+ from numba.core.target_extension import (_get_local_target_checked,
919
+ dispatcher_registry)
920
+ hwstr = self.metadata.get('target', 'generic')
921
+ target_hw = _get_local_target_checked(self.context, hwstr, reason)
922
+ # Get registry for the current hardware
923
+ disp = dispatcher_registry[target_hw]
924
+ tgtctx = disp.targetdescr.target_context
925
+ # This is all workarounds...
926
+ # The issue is that whilst targets shouldn't care about which registry
927
+ # in which to register lowering implementations, the CUDA target
928
+ # "borrows" implementations from the CPU from specific registries. This
929
+ # means that if some impl is defined via @intrinsic, e.g. numba.*unsafe
930
+ # modules, _AND_ CUDA also makes use of the same impl, then it's
931
+ # required that the registry in use is one that CUDA borrows from. This
932
+ # leads to the following expression where by the CPU builtin_registry is
933
+ # used if it is in the target context as a known registry (i.e. the
934
+ # target installed it) and if it is not then it is assumed that the
935
+ # registries for the target are unbound to any other target and so it's
936
+ # fine to use any of them as a place to put lowering impls.
937
+ #
938
+ # NOTE: This will need subsequently fixing again when targets use solely
939
+ # the extension APIs to describe their implementation. The issue will be
940
+ # that the builtin_registry should contain _just_ the stack allocated
941
+ # implementations and low level target invariant things and should not
942
+ # be modified further. It should be acceptable to remove the `then`
943
+ # branch and just keep the `else`.
944
+
945
+ # In case the target has swapped, e.g. cuda borrowing cpu, refresh to
946
+ # populate.
947
+ tgtctx.refresh()
948
+ if builtin_registry in tgtctx._registries:
949
+ reg = builtin_registry
950
+ else:
951
+ # Pick a registry in which to install intrinsics
952
+ registries = iter(tgtctx._registries)
953
+ reg = next(registries)
954
+ return reg
955
+
956
+
957
+ class _IntrinsicTemplate(_TemplateTargetHelperMixin, AbstractTemplate):
958
+ """
959
+ A base class of templates for intrinsic definition
960
+ """
961
+
962
+ def generic(self, args, kws):
963
+ """
964
+ Type the intrinsic by the arguments.
965
+ """
966
+ lower_builtin = self._get_target_registry('intrinsic').lower
967
+ cache_key = self.context, args, tuple(kws.items())
968
+ try:
969
+ return self._impl_cache[cache_key]
970
+ except KeyError:
971
+ pass
972
+ result = self._definition_func(self.context, *args, **kws)
973
+ if result is None:
974
+ return
975
+ [sig, imp] = result
976
+ pysig = utils.pysignature(self._definition_func)
977
+ # omit context argument from user function
978
+ parameters = list(pysig.parameters.values())[1:]
979
+ sig = sig.replace(pysig=pysig.replace(parameters=parameters))
980
+ self._impl_cache[cache_key] = sig
981
+ self._overload_cache[sig.args] = imp
982
+ # register the lowering
983
+ lower_builtin(imp, *sig.args)(imp)
984
+ return sig
985
+
986
+ def get_impl_key(self, sig):
987
+ """
988
+ Return the key for looking up the implementation for the given
989
+ signature on the target context.
990
+ """
991
+ return self._overload_cache[sig.args]
992
+
993
+ def get_template_info(self):
994
+ basepath = os.path.dirname(os.path.dirname(numba.__file__))
995
+ impl = self._definition_func
996
+ code, firstlineno, path = self.get_source_code_info(impl)
997
+ sig = str(utils.pysignature(impl))
998
+ info = {
999
+ 'kind': "intrinsic",
1000
+ 'name': getattr(impl, '__qualname__', impl.__name__),
1001
+ 'sig': sig,
1002
+ 'filename': utils.safe_relpath(path, start=basepath),
1003
+ 'lines': (firstlineno, firstlineno + len(code) - 1),
1004
+ 'docstring': impl.__doc__
1005
+ }
1006
+ return info
1007
+
1008
+
1009
+ def make_intrinsic_template(handle, defn, name, *, prefer_literal=False,
1010
+ kwargs=None):
1011
+ """
1012
+ Make a template class for a intrinsic handle *handle* defined by the
1013
+ function *defn*. The *name* is used for naming the new template class.
1014
+ """
1015
+ kwargs = MappingProxyType({} if kwargs is None else kwargs)
1016
+ base = _IntrinsicTemplate
1017
+ name = "_IntrinsicTemplate_%s" % (name)
1018
+ dct = dict(key=handle, _definition_func=staticmethod(defn),
1019
+ _impl_cache={}, _overload_cache={},
1020
+ prefer_literal=prefer_literal, metadata=kwargs)
1021
+ return type(base)(name, (base,), dct)
1022
+
1023
+
1024
+ class AttributeTemplate(object):
1025
+ def __init__(self, context):
1026
+ self.context = context
1027
+
1028
+ def resolve(self, value, attr):
1029
+ return self._resolve(value, attr)
1030
+
1031
+ def _resolve(self, value, attr):
1032
+ fn = getattr(self, "resolve_%s" % attr, None)
1033
+ if fn is None:
1034
+ fn = self.generic_resolve
1035
+ if fn is NotImplemented:
1036
+ if isinstance(value, types.Module):
1037
+ return self.context.resolve_module_constants(value, attr)
1038
+ else:
1039
+ return None
1040
+ else:
1041
+ return fn(value, attr)
1042
+ else:
1043
+ return fn(value)
1044
+
1045
+ generic_resolve = NotImplemented
1046
+
1047
+
1048
+ class _OverloadAttributeTemplate(_TemplateTargetHelperMixin, AttributeTemplate):
1049
+ """
1050
+ A base class of templates for @overload_attribute functions.
1051
+ """
1052
+ is_method = False
1053
+
1054
+ def __init__(self, context):
1055
+ super(_OverloadAttributeTemplate, self).__init__(context)
1056
+ self.context = context
1057
+ self._init_once()
1058
+
1059
+ def _init_once(self):
1060
+ cls = type(self)
1061
+ attr = cls._attr
1062
+
1063
+ lower_getattr = self._get_target_registry('attribute').lower_getattr
1064
+
1065
+ @lower_getattr(cls.key, attr)
1066
+ def getattr_impl(context, builder, typ, value):
1067
+ typingctx = context.typing_context
1068
+ fnty = cls._get_function_type(typingctx, typ)
1069
+ sig = cls._get_signature(typingctx, fnty, (typ,), {})
1070
+ call = context.get_function(fnty, sig)
1071
+ return call(builder, (value,))
1072
+
1073
+ def _resolve(self, typ, attr):
1074
+ if self._attr != attr:
1075
+ return None
1076
+ fnty = self._get_function_type(self.context, typ)
1077
+ sig = self._get_signature(self.context, fnty, (typ,), {})
1078
+ # There should only be one template
1079
+ for template in fnty.templates:
1080
+ self._inline_overloads.update(template._inline_overloads)
1081
+ return sig.return_type
1082
+
1083
+ @classmethod
1084
+ def _get_signature(cls, typingctx, fnty, args, kws):
1085
+ sig = fnty.get_call_type(typingctx, args, kws)
1086
+ sig = sig.replace(pysig=utils.pysignature(cls._overload_func))
1087
+ return sig
1088
+
1089
+ @classmethod
1090
+ def _get_function_type(cls, typingctx, typ):
1091
+ return typingctx.resolve_value_type(cls._overload_func)
1092
+
1093
+
1094
+ class _OverloadMethodTemplate(_OverloadAttributeTemplate):
1095
+ """
1096
+ A base class of templates for @overload_method functions.
1097
+ """
1098
+ is_method = True
1099
+
1100
+ def _init_once(self):
1101
+ """
1102
+ Overriding parent definition
1103
+ """
1104
+ attr = self._attr
1105
+
1106
+ registry = self._get_target_registry('method')
1107
+
1108
+ @registry.lower((self.key, attr), self.key, types.VarArg(types.Any))
1109
+ def method_impl(context, builder, sig, args):
1110
+ typ = sig.args[0]
1111
+ typing_context = context.typing_context
1112
+ fnty = self._get_function_type(typing_context, typ)
1113
+ sig = self._get_signature(typing_context, fnty, sig.args, {})
1114
+ call = context.get_function(fnty, sig)
1115
+ # Link dependent library
1116
+ context.add_linking_libs(getattr(call, 'libs', ()))
1117
+ return call(builder, args)
1118
+
1119
+ def _resolve(self, typ, attr):
1120
+ if self._attr != attr:
1121
+ return None
1122
+
1123
+ if isinstance(typ, types.TypeRef):
1124
+ assert typ == self.key
1125
+ elif isinstance(typ, types.Callable):
1126
+ assert typ == self.key
1127
+ else:
1128
+ assert isinstance(typ, self.key)
1129
+
1130
+ class MethodTemplate(AbstractTemplate):
1131
+ key = (self.key, attr)
1132
+ _inline = self._inline
1133
+ _overload_func = staticmethod(self._overload_func)
1134
+ _inline_overloads = self._inline_overloads
1135
+ prefer_literal = self.prefer_literal
1136
+
1137
+ def generic(_, args, kws):
1138
+ args = (typ,) + tuple(args)
1139
+ fnty = self._get_function_type(self.context, typ)
1140
+ sig = self._get_signature(self.context, fnty, args, kws)
1141
+ sig = sig.replace(pysig=utils.pysignature(self._overload_func))
1142
+ for template in fnty.templates:
1143
+ self._inline_overloads.update(template._inline_overloads)
1144
+ if sig is not None:
1145
+ return sig.as_method()
1146
+
1147
+ def get_template_info(self):
1148
+ basepath = os.path.dirname(os.path.dirname(numba.__file__))
1149
+ impl = self._overload_func
1150
+ code, firstlineno, path = self.get_source_code_info(impl)
1151
+ sig = str(utils.pysignature(impl))
1152
+ info = {
1153
+ 'kind': "overload_method",
1154
+ 'name': getattr(impl, '__qualname__', impl.__name__),
1155
+ 'sig': sig,
1156
+ 'filename': utils.safe_relpath(path, start=basepath),
1157
+ 'lines': (firstlineno, firstlineno + len(code) - 1),
1158
+ 'docstring': impl.__doc__
1159
+ }
1160
+
1161
+ return info
1162
+
1163
+ return types.BoundFunction(MethodTemplate, typ)
1164
+
1165
+
1166
+ def make_overload_attribute_template(typ, attr, overload_func, inline='never',
1167
+ prefer_literal=False,
1168
+ base=_OverloadAttributeTemplate,
1169
+ **kwargs):
1170
+ """
1171
+ Make a template class for attribute *attr* of *typ* overloaded by
1172
+ *overload_func*.
1173
+ """
1174
+ assert isinstance(typ, types.Type) or issubclass(typ, types.Type)
1175
+ name = "OverloadAttributeTemplate_%s_%s" % (typ, attr)
1176
+ # Note the implementation cache is subclass-specific
1177
+ dct = dict(key=typ, _attr=attr, _impl_cache={},
1178
+ _inline=staticmethod(InlineOptions(inline)),
1179
+ _inline_overloads={},
1180
+ _overload_func=staticmethod(overload_func),
1181
+ prefer_literal=prefer_literal,
1182
+ metadata=kwargs,
1183
+ )
1184
+ obj = type(base)(name, (base,), dct)
1185
+ return obj
1186
+
1187
+
1188
+ def make_overload_method_template(typ, attr, overload_func, inline,
1189
+ prefer_literal=False, **kwargs):
1190
+ """
1191
+ Make a template class for method *attr* of *typ* overloaded by
1192
+ *overload_func*.
1193
+ """
1194
+ return make_overload_attribute_template(
1195
+ typ, attr, overload_func, inline=inline,
1196
+ base=_OverloadMethodTemplate, prefer_literal=prefer_literal,
1197
+ **kwargs,
1198
+ )
1199
+
1200
+
1201
+ def bound_function(template_key):
1202
+ """
1203
+ Wrap an AttributeTemplate resolve_* method to allow it to
1204
+ resolve an instance method's signature rather than a instance attribute.
1205
+ The wrapped method must return the resolved method's signature
1206
+ according to the given self type, args, and keywords.
1207
+
1208
+ It is used thusly:
1209
+
1210
+ class ComplexAttributes(AttributeTemplate):
1211
+ @bound_function("complex.conjugate")
1212
+ def resolve_conjugate(self, ty, args, kwds):
1213
+ return ty
1214
+
1215
+ *template_key* (e.g. "complex.conjugate" above) will be used by the
1216
+ target to look up the method's implementation, as a regular function.
1217
+ """
1218
+ def wrapper(method_resolver):
1219
+ @functools.wraps(method_resolver)
1220
+ def attribute_resolver(self, ty):
1221
+ class MethodTemplate(AbstractTemplate):
1222
+ key = template_key
1223
+
1224
+ def generic(_, args, kws):
1225
+ sig = method_resolver(self, ty, args, kws)
1226
+ if sig is not None and sig.recvr is None:
1227
+ sig = sig.replace(recvr=ty)
1228
+ return sig
1229
+
1230
+ return types.BoundFunction(MethodTemplate, ty)
1231
+ return attribute_resolver
1232
+ return wrapper
1233
+
1234
+
1235
+ # -----------------------------
1236
+
1237
+ class Registry(object):
1238
+ """
1239
+ A registry of typing declarations. The registry stores such declarations
1240
+ for functions, attributes and globals.
1241
+ """
1242
+
1243
+ def __init__(self):
1244
+ self.functions = []
1245
+ self.attributes = []
1246
+ self.globals = []
1247
+
1248
+ def register(self, item):
1249
+ assert issubclass(item, FunctionTemplate)
1250
+ self.functions.append(item)
1251
+ return item
1252
+
1253
+ def register_attr(self, item):
1254
+ assert issubclass(item, AttributeTemplate)
1255
+ self.attributes.append(item)
1256
+ return item
1257
+
1258
+ def register_global(self, val=None, typ=None, **kwargs):
1259
+ """
1260
+ Register the typing of a global value.
1261
+ Functional usage with a Numba type::
1262
+ register_global(value, typ)
1263
+
1264
+ Decorator usage with a template class::
1265
+ @register_global(value, typing_key=None)
1266
+ class Template:
1267
+ ...
1268
+ """
1269
+ if typ is not None:
1270
+ # register_global(val, typ)
1271
+ assert val is not None
1272
+ assert not kwargs
1273
+ self.globals.append((val, typ))
1274
+ else:
1275
+ def decorate(cls, typing_key):
1276
+ class Template(cls):
1277
+ key = typing_key
1278
+ if callable(val):
1279
+ typ = types.Function(Template)
1280
+ else:
1281
+ raise TypeError("cannot infer type for global value %r")
1282
+ self.globals.append((val, typ))
1283
+ return cls
1284
+
1285
+ # register_global(val, typing_key=None)(<template class>)
1286
+ assert val is not None
1287
+ typing_key = kwargs.pop('typing_key', val)
1288
+ assert not kwargs
1289
+ if typing_key is val:
1290
+ # Check the value is globally reachable, as it is going
1291
+ # to be used as the key.
1292
+ mod = sys.modules[val.__module__]
1293
+ if getattr(mod, val.__name__) is not val:
1294
+ raise ValueError("%r is not globally reachable as '%s.%s'"
1295
+ % (mod, val.__module__, val.__name__))
1296
+
1297
+ def decorator(cls):
1298
+ return decorate(cls, typing_key)
1299
+ return decorator
1300
+
1301
+
1302
+ class BaseRegistryLoader(object):
1303
+ """
1304
+ An incremental loader for a registry. Each new call to
1305
+ new_registrations() will iterate over the not yet seen registrations.
1306
+
1307
+ The reason for this object is multiple:
1308
+ - there can be several contexts
1309
+ - each context wants to install all registrations
1310
+ - registrations can be added after the first installation, so contexts
1311
+ must be able to get the "new" installations
1312
+
1313
+ Therefore each context maintains its own loaders for each existing
1314
+ registry, without duplicating the registries themselves.
1315
+ """
1316
+
1317
+ def __init__(self, registry):
1318
+ self._registrations = dict(
1319
+ (name, utils.stream_list(getattr(registry, name)))
1320
+ for name in self.registry_items)
1321
+
1322
+ def new_registrations(self, name):
1323
+ for item in next(self._registrations[name]):
1324
+ yield item
1325
+
1326
+
1327
+ class RegistryLoader(BaseRegistryLoader):
1328
+ """
1329
+ An incremental loader for a typing registry.
1330
+ """
1331
+ registry_items = ('functions', 'attributes', 'globals')
1332
+
1333
+
1334
+ builtin_registry = Registry()
1335
+ infer = builtin_registry.register
1336
+ infer_getattr = builtin_registry.register_attr
1337
+ infer_global = builtin_registry.register_global
lib/python3.10/site-packages/numba/core/unsafe/__init__.py ADDED
File without changes
lib/python3.10/site-packages/numba/core/unsafe/bytes.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This file provides internal compiler utilities that support certain special
3
+ operations with bytes and workarounds for limitations enforced in userland.
4
+ """
5
+
6
+ from numba.core.extending import intrinsic
7
+ from llvmlite import ir
8
+ from numba.core import types, cgutils
9
+
10
+
11
+ @intrinsic
12
+ def grab_byte(typingctx, data, offset):
13
+ # returns a byte at a given offset in data
14
+ def impl(context, builder, signature, args):
15
+ data, idx = args
16
+ ptr = builder.bitcast(data, ir.IntType(8).as_pointer())
17
+ ch = builder.load(builder.gep(ptr, [idx]))
18
+ return ch
19
+
20
+ sig = types.uint8(types.voidptr, types.intp)
21
+ return sig, impl
22
+
23
+
24
+ @intrinsic
25
+ def grab_uint64_t(typingctx, data, offset):
26
+ # returns a uint64_t at a given offset in data
27
+ def impl(context, builder, signature, args):
28
+ data, idx = args
29
+ ptr = builder.bitcast(data, ir.IntType(64).as_pointer())
30
+ ch = builder.load(builder.gep(ptr, [idx]))
31
+ return ch
32
+ sig = types.uint64(types.voidptr, types.intp)
33
+ return sig, impl
34
+
35
+
36
+ @intrinsic
37
+ def memcpy_region(typingctx, dst, dst_offset, src, src_offset, nbytes, align):
38
+ '''Copy nbytes from *(src + src_offset) to *(dst + dst_offset)'''
39
+ def codegen(context, builder, signature, args):
40
+ [dst_val, dst_offset_val, src_val, src_offset_val, nbytes_val,
41
+ align_val] = args
42
+ src_ptr = builder.gep(src_val, [src_offset_val])
43
+ dst_ptr = builder.gep(dst_val, [dst_offset_val])
44
+ cgutils.raw_memcpy(builder, dst_ptr, src_ptr, nbytes_val, align_val)
45
+ return context.get_dummy_value()
46
+
47
+ sig = types.void(types.voidptr, types.intp, types.voidptr, types.intp,
48
+ types.intp, types.intp)
49
+ return sig, codegen