Matt300209 commited on
Commit
bc9f36d
·
verified ·
1 Parent(s): 0ae3676

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. venv/lib/python3.10/site-packages/numba/__pycache__/__init__.cpython-310.pyc +0 -0
  2. venv/lib/python3.10/site-packages/numba/__pycache__/__main__.cpython-310.pyc +0 -0
  3. venv/lib/python3.10/site-packages/numba/__pycache__/_version.cpython-310.pyc +0 -0
  4. venv/lib/python3.10/site-packages/numba/__pycache__/extending.cpython-310.pyc +0 -0
  5. venv/lib/python3.10/site-packages/numba/__pycache__/runtests.cpython-310.pyc +0 -0
  6. venv/lib/python3.10/site-packages/numba/cuda/__pycache__/printimpl.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/numba/cuda/__pycache__/random.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/numba/cuda/__pycache__/simulator_init.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/numba/cuda/__pycache__/stubs.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/numba/cuda/__pycache__/target.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/numba/cuda/__pycache__/testing.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/numba/cuda/__pycache__/types.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/numba/cuda/__pycache__/ufuncs.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/numba/cuda/__pycache__/vector_types.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/numba/cuda/__pycache__/vectorizers.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/numba/misc/__pycache__/POST.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/numba/misc/__pycache__/__init__.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/numba/misc/__pycache__/appdirs.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/numba/misc/__pycache__/cffiimpl.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/numba/misc/__pycache__/coverage_support.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/numba/misc/__pycache__/dump_style.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/numba/misc/__pycache__/findlib.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/numba/misc/__pycache__/firstlinefinder.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/numba/misc/__pycache__/gdb_hook.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/numba/misc/__pycache__/gdb_print_extension.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/numba/misc/__pycache__/init_utils.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/numba/misc/__pycache__/inspection.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/numba/misc/__pycache__/literal.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/numba/misc/__pycache__/llvm_pass_timings.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/numba/misc/__pycache__/mergesort.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/numba/misc/__pycache__/numba_entry.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/numba/misc/__pycache__/numba_gdbinfo.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/numba/misc/__pycache__/numba_sysinfo.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/numba/misc/__pycache__/quicksort.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/numba/misc/__pycache__/special.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/numba/misc/__pycache__/timsort.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/numba/misc/gdb_print_extension.py +204 -0
  38. venv/lib/python3.10/site-packages/numba/misc/help/__init__.py +0 -0
  39. venv/lib/python3.10/site-packages/numba/misc/help/inspector.py +433 -0
  40. venv/lib/python3.10/site-packages/numba/misc/init_utils.py +44 -0
  41. venv/lib/python3.10/site-packages/numba/misc/inspection.py +103 -0
  42. venv/lib/python3.10/site-packages/numba/misc/literal.py +24 -0
  43. venv/lib/python3.10/site-packages/numba/misc/llvm_pass_timings.py +409 -0
  44. venv/lib/python3.10/site-packages/numba/misc/mergesort.py +126 -0
  45. venv/lib/python3.10/site-packages/numba/misc/numba_entry.py +72 -0
  46. venv/lib/python3.10/site-packages/numba/misc/numba_gdbinfo.py +161 -0
  47. venv/lib/python3.10/site-packages/numba/misc/numba_sysinfo.py +698 -0
  48. venv/lib/python3.10/site-packages/numba/misc/quicksort.py +261 -0
  49. venv/lib/python3.10/site-packages/numba/misc/special.py +104 -0
  50. venv/lib/python3.10/site-packages/numba/misc/timsort.py +943 -0
venv/lib/python3.10/site-packages/numba/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (5.46 kB). View file
 
venv/lib/python3.10/site-packages/numba/__pycache__/__main__.cpython-310.pyc ADDED
Binary file (425 Bytes). View file
 
venv/lib/python3.10/site-packages/numba/__pycache__/_version.cpython-310.pyc ADDED
Binary file (566 Bytes). View file
 
venv/lib/python3.10/site-packages/numba/__pycache__/extending.cpython-310.pyc ADDED
Binary file (312 Bytes). View file
 
venv/lib/python3.10/site-packages/numba/__pycache__/runtests.cpython-310.pyc ADDED
Binary file (450 Bytes). View file
 
venv/lib/python3.10/site-packages/numba/cuda/__pycache__/printimpl.cpython-310.pyc ADDED
Binary file (2.89 kB). View file
 
venv/lib/python3.10/site-packages/numba/cuda/__pycache__/random.cpython-310.pyc ADDED
Binary file (7.94 kB). View file
 
venv/lib/python3.10/site-packages/numba/cuda/__pycache__/simulator_init.cpython-310.pyc ADDED
Binary file (627 Bytes). View file
 
venv/lib/python3.10/site-packages/numba/cuda/__pycache__/stubs.cpython-310.pyc ADDED
Binary file (27.2 kB). View file
 
venv/lib/python3.10/site-packages/numba/cuda/__pycache__/target.cpython-310.pyc ADDED
Binary file (15.4 kB). View file
 
venv/lib/python3.10/site-packages/numba/cuda/__pycache__/testing.cpython-310.pyc ADDED
Binary file (7.28 kB). View file
 
venv/lib/python3.10/site-packages/numba/cuda/__pycache__/types.cpython-310.pyc ADDED
Binary file (1.23 kB). View file
 
venv/lib/python3.10/site-packages/numba/cuda/__pycache__/ufuncs.cpython-310.pyc ADDED
Binary file (11.2 kB). View file
 
venv/lib/python3.10/site-packages/numba/cuda/__pycache__/vector_types.cpython-310.pyc ADDED
Binary file (6.94 kB). View file
 
venv/lib/python3.10/site-packages/numba/cuda/__pycache__/vectorizers.cpython-310.pyc ADDED
Binary file (9.44 kB). View file
 
venv/lib/python3.10/site-packages/numba/misc/__pycache__/POST.cpython-310.pyc ADDED
Binary file (1.32 kB). View file
 
venv/lib/python3.10/site-packages/numba/misc/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (246 Bytes). View file
 
venv/lib/python3.10/site-packages/numba/misc/__pycache__/appdirs.cpython-310.pyc ADDED
Binary file (18.4 kB). View file
 
venv/lib/python3.10/site-packages/numba/misc/__pycache__/cffiimpl.cpython-310.pyc ADDED
Binary file (816 Bytes). View file
 
venv/lib/python3.10/site-packages/numba/misc/__pycache__/coverage_support.cpython-310.pyc ADDED
Binary file (3.89 kB). View file
 
venv/lib/python3.10/site-packages/numba/misc/__pycache__/dump_style.cpython-310.pyc ADDED
Binary file (2.31 kB). View file
 
venv/lib/python3.10/site-packages/numba/misc/__pycache__/findlib.cpython-310.pyc ADDED
Binary file (1.99 kB). View file
 
venv/lib/python3.10/site-packages/numba/misc/__pycache__/firstlinefinder.cpython-310.pyc ADDED
Binary file (2.66 kB). View file
 
venv/lib/python3.10/site-packages/numba/misc/__pycache__/gdb_hook.cpython-310.pyc ADDED
Binary file (7.08 kB). View file
 
venv/lib/python3.10/site-packages/numba/misc/__pycache__/gdb_print_extension.cpython-310.pyc ADDED
Binary file (6.12 kB). View file
 
venv/lib/python3.10/site-packages/numba/misc/__pycache__/init_utils.cpython-310.pyc ADDED
Binary file (1.43 kB). View file
 
venv/lib/python3.10/site-packages/numba/misc/__pycache__/inspection.cpython-310.pyc ADDED
Binary file (3.3 kB). View file
 
venv/lib/python3.10/site-packages/numba/misc/__pycache__/literal.cpython-310.pyc ADDED
Binary file (1.23 kB). View file
 
venv/lib/python3.10/site-packages/numba/misc/__pycache__/llvm_pass_timings.cpython-310.pyc ADDED
Binary file (12.8 kB). View file
 
venv/lib/python3.10/site-packages/numba/misc/__pycache__/mergesort.cpython-310.pyc ADDED
Binary file (3.29 kB). View file
 
venv/lib/python3.10/site-packages/numba/misc/__pycache__/numba_entry.cpython-310.pyc ADDED
Binary file (2.45 kB). View file
 
venv/lib/python3.10/site-packages/numba/misc/__pycache__/numba_gdbinfo.cpython-310.pyc ADDED
Binary file (5.05 kB). View file
 
venv/lib/python3.10/site-packages/numba/misc/__pycache__/numba_sysinfo.cpython-310.pyc ADDED
Binary file (20.5 kB). View file
 
venv/lib/python3.10/site-packages/numba/misc/__pycache__/quicksort.cpython-310.pyc ADDED
Binary file (6.1 kB). View file
 
venv/lib/python3.10/site-packages/numba/misc/__pycache__/special.cpython-310.pyc ADDED
Binary file (4.06 kB). View file
 
venv/lib/python3.10/site-packages/numba/misc/__pycache__/timsort.cpython-310.pyc ADDED
Binary file (17.8 kB). View file
 
venv/lib/python3.10/site-packages/numba/misc/gdb_print_extension.py ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """gdb printing extension for Numba types.
2
+ """
3
+ import re
4
+
5
+ try:
6
+ import gdb.printing
7
+ import gdb
8
+ except ImportError:
9
+ raise ImportError("GDB python support is not available.")
10
+
11
+
12
+ class NumbaArrayPrinter:
13
+
14
+ def __init__(self, val):
15
+ self.val = val
16
+
17
+ def to_string(self):
18
+ try:
19
+ import numpy as np
20
+ HAVE_NUMPY = True
21
+ except ImportError:
22
+ HAVE_NUMPY = False
23
+
24
+ try:
25
+ NULL = 0x0
26
+
27
+ # Raw data references, these need unpacking/interpreting.
28
+
29
+ # Member "data" is...
30
+ # DW_TAG_member of DIDerivedType, tag of DW_TAG_pointer_type
31
+ # encoding e.g. DW_ATE_float
32
+ data = self.val["data"]
33
+
34
+ # Member "itemsize" is...
35
+ # DW_TAG_member of DIBasicType encoding DW_ATE_signed
36
+ itemsize = self.val["itemsize"]
37
+
38
+ # Members "shape" and "strides" are...
39
+ # DW_TAG_member of DIDerivedType, the type is a DICompositeType
40
+ # (it's a Numba UniTuple) with tag: DW_TAG_array_type, i.e. it's an
41
+ # array repr, it has a basetype of e.g. DW_ATE_unsigned and also
42
+ # "elements" which are referenced with a DISubrange(count: <const>)
43
+ # to say how many elements are in the array.
44
+ rshp = self.val["shape"]
45
+ rstrides = self.val["strides"]
46
+
47
+ # bool on whether the data is aligned.
48
+ is_aligned = False
49
+
50
+ # type information decode, simple type:
51
+ ty_str = str(self.val.type)
52
+ if HAVE_NUMPY and ('aligned' in ty_str or 'Record' in ty_str):
53
+ ty_str = ty_str.replace('unaligned ','').strip()
54
+ matcher = re.compile(r"array\((Record.*), (.*), (.*)\)\ \(.*")
55
+ # NOTE: need to deal with "Alignment" else dtype size is wrong
56
+ arr_info = [x.strip() for x in matcher.match(ty_str).groups()]
57
+ dtype_str, ndim_str, order_str = arr_info
58
+ rstr = r'Record\((.*\[.*\]);([0-9]+);(True|False)'
59
+ rstr_match = re.match(rstr, dtype_str)
60
+ # balign is unused, it's the alignment
61
+ fields, balign, is_aligned_str = rstr_match.groups()
62
+ is_aligned = is_aligned_str == 'True'
63
+ field_dts = fields.split(',')
64
+ struct_entries = []
65
+ for f in field_dts:
66
+ splitted = f.split('[')
67
+ name = splitted[0]
68
+ dt_part = splitted[1:]
69
+ if len(dt_part) > 1:
70
+ raise TypeError('Unsupported sub-type: %s' % f)
71
+ else:
72
+ dt_part = dt_part[0]
73
+ if "nestedarray" in dt_part:
74
+ raise TypeError('Unsupported sub-type: %s' % f)
75
+ dt_as_str = dt_part.split(';')[0].split('=')[1]
76
+ dtype = np.dtype(dt_as_str)
77
+ struct_entries.append((name, dtype))
78
+ # The dtype is actually a record of some sort
79
+ dtype_str = struct_entries
80
+ else: # simple type
81
+ matcher = re.compile(r"array\((.*),(.*),(.*)\)\ \(.*")
82
+ arr_info = [x.strip() for x in matcher.match(ty_str).groups()]
83
+ dtype_str, ndim_str, order_str = arr_info
84
+ # fix up unichr dtype
85
+ if 'unichr x ' in dtype_str:
86
+ dtype_str = dtype_str[1:-1].replace('unichr x ', '<U')
87
+
88
+ def dwarr2inttuple(dwarr):
89
+ # Converts a gdb handle to a dwarf array to a tuple of ints
90
+ fields = dwarr.type.fields()
91
+ lo, hi = fields[0].type.range()
92
+ return tuple([int(dwarr[x]) for x in range(lo, hi + 1)])
93
+
94
+ # shape/strides extraction
95
+ shape = dwarr2inttuple(rshp)
96
+ strides = dwarr2inttuple(rstrides)
97
+
98
+ # if data is not NULL
99
+ if data != NULL:
100
+ if HAVE_NUMPY:
101
+ # The data extent in bytes is:
102
+ # sum(shape * strides)
103
+ # get the data, then wire to as_strided
104
+ shp_arr = np.array([max(0, x - 1) for x in shape])
105
+ strd_arr = np.array(strides)
106
+ extent = np.sum(shp_arr * strd_arr)
107
+ extent += int(itemsize)
108
+ dtype_clazz = np.dtype(dtype_str, align=is_aligned)
109
+ dtype = dtype_clazz
110
+ this_proc = gdb.selected_inferior()
111
+ mem = this_proc.read_memory(int(data), extent)
112
+ arr_data = np.frombuffer(mem, dtype=dtype)
113
+ new_arr = np.lib.stride_tricks.as_strided(arr_data,
114
+ shape=shape,
115
+ strides=strides,)
116
+ return '\n' + str(new_arr)
117
+ # Catch all for no NumPy
118
+ return "array([...], dtype=%s, shape=%s)" % (dtype_str, shape)
119
+ else:
120
+ # Not yet initialized or NULLed out data
121
+ buf = list(["NULL/Uninitialized"])
122
+ return "array([" + ', '.join(buf) + "]" + ")"
123
+ except Exception as e:
124
+ return 'array[Exception: Failed to parse. %s]' % e
125
+
126
+
127
+ class NumbaComplexPrinter:
128
+
129
+ def __init__(self, val):
130
+ self.val = val
131
+
132
+ def to_string(self):
133
+ return "%s+%sj" % (self.val['real'], self.val['imag'])
134
+
135
+
136
+ class NumbaTuplePrinter:
137
+
138
+ def __init__(self, val):
139
+ self.val = val
140
+
141
+ def to_string(self):
142
+ buf = []
143
+ fields = self.val.type.fields()
144
+ for f in fields:
145
+ buf.append(str(self.val[f.name]))
146
+ return "(%s)" % ', '.join(buf)
147
+
148
+
149
+ class NumbaUniTuplePrinter:
150
+
151
+ def __init__(self, val):
152
+ self.val = val
153
+
154
+ def to_string(self):
155
+ # unituples are arrays
156
+ fields = self.val.type.fields()
157
+ lo, hi = fields[0].type.range()
158
+ buf = []
159
+ for i in range(lo, hi + 1):
160
+ buf.append(str(self.val[i]))
161
+ return "(%s)" % ', '.join(buf)
162
+
163
+
164
+ class NumbaUnicodeTypePrinter:
165
+
166
+ def __init__(self, val):
167
+ self.val = val
168
+
169
+ def to_string(self):
170
+ NULL = 0x0
171
+ data = self.val["data"]
172
+ nitems = self.val["length"]
173
+ kind = self.val["kind"]
174
+ if data != NULL:
175
+ # This needs sorting out, encoding is wrong
176
+ this_proc = gdb.selected_inferior()
177
+ mem = this_proc.read_memory(int(data), nitems * kind)
178
+ if isinstance(mem, memoryview):
179
+ buf = bytes(mem).decode()
180
+ else:
181
+ buf = mem.decode('utf-8')
182
+ else:
183
+ buf = str(data)
184
+ return "'%s'" % buf
185
+
186
+
187
+ def _create_printers():
188
+ printer = gdb.printing.RegexpCollectionPrettyPrinter("Numba")
189
+ printer.add_printer('Numba unaligned array printer', '^unaligned array\\(',
190
+ NumbaArrayPrinter)
191
+ printer.add_printer('Numba array printer', '^array\\(', NumbaArrayPrinter)
192
+ printer.add_printer('Numba complex printer', '^complex[0-9]+\\ ',
193
+ NumbaComplexPrinter)
194
+ printer.add_printer('Numba Tuple printer', '^Tuple\\(',
195
+ NumbaTuplePrinter)
196
+ printer.add_printer('Numba UniTuple printer', '^UniTuple\\(',
197
+ NumbaUniTuplePrinter)
198
+ printer.add_printer('Numba unicode_type printer', '^unicode_type\\s+\\(',
199
+ NumbaUnicodeTypePrinter)
200
+ return printer
201
+
202
+
203
+ # register the Numba pretty printers for the current object
204
+ gdb.printing.register_pretty_printer(gdb.current_objfile(), _create_printers())
venv/lib/python3.10/site-packages/numba/misc/help/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/numba/misc/help/inspector.py ADDED
@@ -0,0 +1,433 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This file contains `__main__` so that it can be run as a commandline tool.
3
+
4
+ This file contains functions to inspect Numba's support for a given Python
5
+ module or a Python package.
6
+ """
7
+
8
+ import argparse
9
+ import pkgutil
10
+ import warnings
11
+ import types as pytypes
12
+
13
+ from numba.core import errors
14
+ from numba._version import get_versions
15
+ from numba.core.registry import cpu_target
16
+ from numba.tests.support import captured_stdout
17
+
18
+
19
+ def _get_commit():
20
+ full = get_versions()['full-revisionid']
21
+ if not full:
22
+ warnings.warn(
23
+ "Cannot find git commit hash. Source links could be inaccurate.",
24
+ category=errors.NumbaWarning,
25
+ )
26
+ return 'main'
27
+ return full
28
+
29
+
30
+ commit = _get_commit()
31
+ github_url = 'https://github.com/numba/numba/blob/{commit}/{path}#L{firstline}-L{lastline}' # noqa: E501
32
+
33
+
34
+ def inspect_function(function, target=None):
35
+ """Return information about the support of a function.
36
+
37
+ Returns
38
+ -------
39
+ info : dict
40
+ Defined keys:
41
+ - "numba_type": str or None
42
+ The numba type object of the function if supported.
43
+ - "explained": str
44
+ A textual description of the support.
45
+ - "source_infos": dict
46
+ A dictionary containing the source location of each definition.
47
+ """
48
+ target = target or cpu_target
49
+ tyct = target.typing_context
50
+ # Make sure we have loaded all extensions
51
+ tyct.refresh()
52
+ target.target_context.refresh()
53
+
54
+ info = {}
55
+ # Try getting the function type
56
+ source_infos = {}
57
+ try:
58
+ nbty = tyct.resolve_value_type(function)
59
+ except ValueError:
60
+ nbty = None
61
+ explained = 'not supported'
62
+ else:
63
+ # Make a longer explanation of the type
64
+ explained = tyct.explain_function_type(nbty)
65
+ for temp in nbty.templates:
66
+ try:
67
+ source_infos[temp] = temp.get_source_info()
68
+ except AttributeError:
69
+ source_infos[temp] = None
70
+
71
+ info['numba_type'] = nbty
72
+ info['explained'] = explained
73
+ info['source_infos'] = source_infos
74
+ return info
75
+
76
+
77
+ def inspect_module(module, target=None, alias=None):
78
+ """Inspect a module object and yielding results from `inspect_function()`
79
+ for each function object in the module.
80
+ """
81
+ alias = {} if alias is None else alias
82
+ # Walk the module
83
+ for name in dir(module):
84
+ if name.startswith('_'):
85
+ # Skip
86
+ continue
87
+ obj = getattr(module, name)
88
+ supported_types = (pytypes.FunctionType, pytypes.BuiltinFunctionType)
89
+
90
+ if not isinstance(obj, supported_types):
91
+ # Skip if it's not a function
92
+ continue
93
+
94
+ info = dict(module=module, name=name, obj=obj)
95
+ if obj in alias:
96
+ info['alias'] = alias[obj]
97
+ else:
98
+ alias[obj] = "{module}.{name}".format(module=module.__name__,
99
+ name=name)
100
+ info.update(inspect_function(obj, target=target))
101
+ yield info
102
+
103
+
104
+ class _Stat(object):
105
+ """For gathering simple statistic of (un)supported functions"""
106
+ def __init__(self):
107
+ self.supported = 0
108
+ self.unsupported = 0
109
+
110
+ @property
111
+ def total(self):
112
+ total = self.supported + self.unsupported
113
+ return total
114
+
115
+ @property
116
+ def ratio(self):
117
+ ratio = self.supported / self.total * 100
118
+ return ratio
119
+
120
+ def describe(self):
121
+ if self.total == 0:
122
+ return "empty"
123
+ return "supported = {supported} / {total} = {ratio:.2f}%".format(
124
+ supported=self.supported,
125
+ total=self.total,
126
+ ratio=self.ratio,
127
+ )
128
+
129
+ def __repr__(self):
130
+ return "{clsname}({describe})".format(
131
+ clsname=self.__class__.__name__,
132
+ describe=self.describe(),
133
+ )
134
+
135
+
136
+ def filter_private_module(module_components):
137
+ return not any(x.startswith('_') for x in module_components)
138
+
139
+
140
+ def filter_tests_module(module_components):
141
+ return not any(x == 'tests' for x in module_components)
142
+
143
+
144
+ _default_module_filters = (
145
+ filter_private_module,
146
+ filter_tests_module,
147
+ )
148
+
149
+
150
+ def list_modules_in_package(package, module_filters=_default_module_filters):
151
+ """Yield all modules in a given package.
152
+
153
+ Recursively walks the package tree.
154
+ """
155
+ onerror_ignore = lambda _: None
156
+
157
+ prefix = package.__name__ + "."
158
+ package_walker = pkgutil.walk_packages(
159
+ package.__path__,
160
+ prefix,
161
+ onerror=onerror_ignore,
162
+ )
163
+
164
+ def check_filter(modname):
165
+ module_components = modname.split('.')
166
+ return any(not filter_fn(module_components)
167
+ for filter_fn in module_filters)
168
+
169
+ modname = package.__name__
170
+ if not check_filter(modname):
171
+ yield package
172
+
173
+ for pkginfo in package_walker:
174
+ modname = pkginfo[1]
175
+ if check_filter(modname):
176
+ continue
177
+ # In case importing of the module print to stdout
178
+ with captured_stdout():
179
+ try:
180
+ # Import the module
181
+ mod = __import__(modname)
182
+ except Exception:
183
+ continue
184
+
185
+ # Extract the module
186
+ for part in modname.split('.')[1:]:
187
+ try:
188
+ mod = getattr(mod, part)
189
+ except AttributeError:
190
+ # Suppress error in getting the attribute
191
+ mod = None
192
+ break
193
+
194
+ # Ignore if mod is not a module
195
+ if not isinstance(mod, pytypes.ModuleType):
196
+ # Skip non-module
197
+ continue
198
+
199
+ yield mod
200
+
201
+
202
+ class Formatter(object):
203
+ """Base class for formatters.
204
+ """
205
+ def __init__(self, fileobj):
206
+ self._fileobj = fileobj
207
+
208
+ def print(self, *args, **kwargs):
209
+ kwargs.setdefault('file', self._fileobj)
210
+ print(*args, **kwargs)
211
+
212
+
213
+ class HTMLFormatter(Formatter):
214
+ """Formatter that outputs HTML
215
+ """
216
+
217
+ def escape(self, text):
218
+ import html
219
+ return html.escape(text)
220
+
221
+ def title(self, text):
222
+ self.print('<h1>', text, '</h2>')
223
+
224
+ def begin_module_section(self, modname):
225
+ self.print('<h2>', modname, '</h2>')
226
+ self.print('<ul>')
227
+
228
+ def end_module_section(self):
229
+ self.print('</ul>')
230
+
231
+ def write_supported_item(self, modname, itemname, typename, explained,
232
+ sources, alias):
233
+ self.print('<li>')
234
+ self.print('{}.<b>{}</b>'.format(
235
+ modname,
236
+ itemname,
237
+ ))
238
+ self.print(': <b>{}</b>'.format(typename))
239
+ self.print('<div><pre>', explained, '</pre></div>')
240
+
241
+ self.print("<ul>")
242
+ for tcls, source in sources.items():
243
+ if source:
244
+ self.print("<li>")
245
+ impl = source['name']
246
+ sig = source['sig']
247
+ filename = source['filename']
248
+ lines = source['lines']
249
+ self.print(
250
+ "<p>defined by <b>{}</b>{} at {}:{}-{}</p>".format(
251
+ self.escape(impl), self.escape(sig),
252
+ self.escape(filename), lines[0], lines[1],
253
+ ),
254
+ )
255
+ self.print('<p>{}</p>'.format(
256
+ self.escape(source['docstring'] or '')
257
+ ))
258
+ else:
259
+ self.print("<li>{}".format(self.escape(str(tcls))))
260
+ self.print("</li>")
261
+ self.print("</ul>")
262
+ self.print('</li>')
263
+
264
+ def write_unsupported_item(self, modname, itemname):
265
+ self.print('<li>')
266
+ self.print('{}.<b>{}</b>: UNSUPPORTED'.format(
267
+ modname,
268
+ itemname,
269
+ ))
270
+ self.print('</li>')
271
+
272
+ def write_statistic(self, stats):
273
+ self.print('<p>{}</p>'.format(stats.describe()))
274
+
275
+
276
+ class ReSTFormatter(Formatter):
277
+ """Formatter that output ReSTructured text format for Sphinx docs.
278
+ """
279
+ def escape(self, text):
280
+ return text
281
+
282
+ def title(self, text):
283
+ self.print(text)
284
+ self.print('=' * len(text))
285
+ self.print()
286
+
287
+ def begin_module_section(self, modname):
288
+ self.print(modname)
289
+ self.print('-' * len(modname))
290
+ self.print()
291
+
292
+ def end_module_section(self):
293
+ self.print()
294
+
295
+ def write_supported_item(self, modname, itemname, typename, explained,
296
+ sources, alias):
297
+ self.print('.. function:: {}.{}'.format(modname, itemname))
298
+ self.print(' :noindex:')
299
+ self.print()
300
+
301
+ if alias:
302
+ self.print(" Alias to: ``{}``".format(alias))
303
+ self.print()
304
+
305
+ for tcls, source in sources.items():
306
+ if source:
307
+ impl = source['name']
308
+ sig = source['sig']
309
+ filename = source['filename']
310
+ lines = source['lines']
311
+ source_link = github_url.format(
312
+ commit=commit,
313
+ path=filename,
314
+ firstline=lines[0],
315
+ lastline=lines[1],
316
+ )
317
+ self.print(
318
+ " - defined by ``{}{}`` at `{}:{}-{} <{}>`_".format(
319
+ impl, sig, filename, lines[0], lines[1], source_link,
320
+ ),
321
+ )
322
+
323
+ else:
324
+ self.print(" - defined by ``{}``".format(str(tcls)))
325
+ self.print()
326
+
327
+ def write_unsupported_item(self, modname, itemname):
328
+ pass
329
+
330
+ def write_statistic(self, stat):
331
+ if stat.supported == 0:
332
+ self.print("This module is not supported.")
333
+ else:
334
+ msg = "Not showing {} unsupported functions."
335
+ self.print(msg.format(stat.unsupported))
336
+ self.print()
337
+ self.print(stat.describe())
338
+ self.print()
339
+
340
+
341
+ def _format_module_infos(formatter, package_name, mod_sequence, target=None):
342
+ """Format modules.
343
+ """
344
+ formatter.title('Listings for {}'.format(package_name))
345
+ alias_map = {} # remember object seen to track alias
346
+ for mod in mod_sequence:
347
+ stat = _Stat()
348
+ modname = mod.__name__
349
+ formatter.begin_module_section(formatter.escape(modname))
350
+ for info in inspect_module(mod, target=target, alias=alias_map):
351
+ nbtype = info['numba_type']
352
+ if nbtype is not None:
353
+ stat.supported += 1
354
+ formatter.write_supported_item(
355
+ modname=formatter.escape(info['module'].__name__),
356
+ itemname=formatter.escape(info['name']),
357
+ typename=formatter.escape(str(nbtype)),
358
+ explained=formatter.escape(info['explained']),
359
+ sources=info['source_infos'],
360
+ alias=info.get('alias'),
361
+ )
362
+
363
+ else:
364
+ stat.unsupported += 1
365
+ formatter.write_unsupported_item(
366
+ modname=formatter.escape(info['module'].__name__),
367
+ itemname=formatter.escape(info['name']),
368
+ )
369
+
370
+ formatter.write_statistic(stat)
371
+ formatter.end_module_section()
372
+
373
+
374
+ def write_listings(package_name, filename, output_format):
375
+ """Write listing information into a file.
376
+
377
+ Parameters
378
+ ----------
379
+ package_name : str
380
+ Name of the package to inspect.
381
+ filename : str
382
+ Output filename. Always overwrite.
383
+ output_format : str
384
+ Support formats are "html" and "rst".
385
+ """
386
+ package = __import__(package_name)
387
+ if hasattr(package, '__path__'):
388
+ mods = list_modules_in_package(package)
389
+ else:
390
+ mods = [package]
391
+
392
+ if output_format == 'html':
393
+ with open(filename + '.html', 'w') as fout:
394
+ fmtr = HTMLFormatter(fileobj=fout)
395
+ _format_module_infos(fmtr, package_name, mods)
396
+ elif output_format == 'rst':
397
+ with open(filename + '.rst', 'w') as fout:
398
+ fmtr = ReSTFormatter(fileobj=fout)
399
+ _format_module_infos(fmtr, package_name, mods)
400
+ else:
401
+ raise ValueError(
402
+ "Output format '{}' is not supported".format(output_format))
403
+
404
+
405
+ program_description = """
406
+ Inspect Numba support for a given top-level package.
407
+ """.strip()
408
+
409
+
410
+ def main():
411
+ parser = argparse.ArgumentParser(description=program_description)
412
+ parser.add_argument(
413
+ 'package', metavar='package', type=str,
414
+ help='Package to inspect',
415
+ )
416
+ parser.add_argument(
417
+ '--format', dest='format', default='html',
418
+ help='Output format; i.e. "html", "rst"',
419
+ )
420
+ parser.add_argument(
421
+ '--file', dest='file', default='inspector_output',
422
+ help='Output filename. Defaults to "inspector_output.<format>"',
423
+ )
424
+
425
+ args = parser.parse_args()
426
+ package_name = args.package
427
+ output_format = args.format
428
+ filename = args.file
429
+ write_listings(package_name, filename, output_format)
430
+
431
+
432
+ if __name__ == '__main__':
433
+ main()
venv/lib/python3.10/site-packages/numba/misc/init_utils.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Collection of miscellaneous initialization utilities."""
2
+
3
+ from collections import namedtuple
4
+
5
+ version_info = namedtuple('version_info',
6
+ ('major minor patch short full '
7
+ 'string tuple git_revision'))
8
+
9
+
10
+ def generate_version_info(version):
11
+ """Process a version string into a structured version_info object.
12
+
13
+ Parameters
14
+ ----------
15
+ version: str
16
+ a string describing the current version
17
+
18
+ Returns
19
+ -------
20
+ version_info: tuple
21
+ structured version information
22
+
23
+ See also
24
+ --------
25
+ Look at the definition of 'version_info' in this module for details.
26
+
27
+ """
28
+ parts = version.split('.')
29
+
30
+ def try_int(x):
31
+ try:
32
+ return int(x)
33
+ except ValueError:
34
+ return None
35
+ major = try_int(parts[0]) if len(parts) >= 1 else None
36
+ minor = try_int(parts[1]) if len(parts) >= 2 else None
37
+ patch = try_int(parts[2]) if len(parts) >= 3 else None
38
+ short = (major, minor)
39
+ full = (major, minor, patch)
40
+ string = version
41
+ tup = tuple(parts)
42
+ git_revision = tup[3] if len(tup) >= 4 else None
43
+ return version_info(major, minor, patch, short, full, string, tup,
44
+ git_revision)
venv/lib/python3.10/site-packages/numba/misc/inspection.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Miscellaneous inspection tools
2
+ """
3
+ from tempfile import NamedTemporaryFile, TemporaryDirectory
4
+ import os
5
+ import warnings
6
+
7
+ from numba.core.errors import NumbaWarning
8
+
9
+
10
+ def disassemble_elf_to_cfg(elf, mangled_symbol):
11
+ """
12
+ Gets the CFG of the disassembly of an ELF object, elf, at mangled name,
13
+ mangled_symbol, and renders it appropriately depending on the execution
14
+ environment (terminal/notebook).
15
+ """
16
+ try:
17
+ import r2pipe
18
+ except ImportError:
19
+ raise RuntimeError("r2pipe package needed for disasm CFG")
20
+
21
+ def get_rendering(cmd=None):
22
+ from numba.pycc.platform import Toolchain # import local, circular ref
23
+ if cmd is None:
24
+ raise ValueError("No command given")
25
+
26
+ with TemporaryDirectory() as tmpdir:
27
+ # Write ELF as a temporary file in the temporary dir, do not delete!
28
+ with NamedTemporaryFile(delete=False, dir=tmpdir) as f:
29
+ f.write(elf)
30
+ f.flush() # force write, radare2 needs a binary blob on disk
31
+
32
+ # Now try and link the ELF, this helps radare2 _a lot_
33
+ linked = False
34
+ try:
35
+ raw_dso_name = f'{os.path.basename(f.name)}.so'
36
+ linked_dso = os.path.join(tmpdir, raw_dso_name)
37
+ tc = Toolchain()
38
+ tc.link_shared(linked_dso, (f.name,))
39
+ obj_to_analyse = linked_dso
40
+ linked = True
41
+ except Exception as e:
42
+ # link failed, mention it to user, radare2 will still be able to
43
+ # analyse the object, but things like dwarf won't appear in the
44
+ # asm as comments.
45
+ msg = ('Linking the ELF object with the distutils toolchain '
46
+ f'failed with: {e}. Disassembly will still work but '
47
+ 'might be less accurate and will not use DWARF '
48
+ 'information.')
49
+ warnings.warn(NumbaWarning(msg))
50
+ obj_to_analyse = f.name
51
+
52
+ # catch if r2pipe can actually talk to radare2
53
+ try:
54
+ flags = ['-2', # close stderr to hide warnings
55
+ '-e io.cache=true', # fix relocations in disassembly
56
+ '-e scr.color=1', # 16 bit ANSI colour terminal
57
+ '-e asm.dwarf=true', # DWARF decode
58
+ '-e scr.utf8=true', # UTF8 output looks better
59
+ ]
60
+ r = r2pipe.open(obj_to_analyse, flags=flags)
61
+ r.cmd('aaaaaa') # analyse as much as possible
62
+ # If the elf is linked then it's necessary to seek as the
63
+ # DSO ctor/dtor is at the default position
64
+ if linked:
65
+ # r2 only matches up to 61 chars?! found this by experiment!
66
+ mangled_symbol_61char = mangled_symbol[:61]
67
+ # switch off demangle, the seek is on a mangled symbol
68
+ r.cmd('e bin.demangle=false')
69
+ # seek to the mangled symbol address
70
+ r.cmd(f's `is~ {mangled_symbol_61char}[1]`')
71
+ # switch demangling back on for output purposes
72
+ r.cmd('e bin.demangle=true')
73
+ data = r.cmd('%s' % cmd) # print graph
74
+ r.quit()
75
+ except Exception as e:
76
+ if "radare2 in PATH" in str(e):
77
+ msg = ("This feature requires 'radare2' to be "
78
+ "installed and available on the system see: "
79
+ "https://github.com/radareorg/radare2. "
80
+ "Cannot find 'radare2' in $PATH.")
81
+ raise RuntimeError(msg)
82
+ else:
83
+ raise e
84
+ return data
85
+
86
+ class DisasmCFG(object):
87
+
88
+ def _repr_svg_(self):
89
+ try:
90
+ import graphviz
91
+ except ImportError:
92
+ raise RuntimeError("graphviz package needed for disasm CFG")
93
+ jupyter_rendering = get_rendering(cmd='agfd')
94
+ # this just makes it read slightly better in jupyter notebooks
95
+ jupyter_rendering.replace('fontname="Courier",',
96
+ 'fontname="Courier",fontsize=6,')
97
+ src = graphviz.Source(jupyter_rendering)
98
+ return src.pipe('svg').decode('UTF-8')
99
+
100
+ def __repr__(self):
101
+ return get_rendering(cmd='agf')
102
+
103
+ return DisasmCFG()
venv/lib/python3.10/site-packages/numba/misc/literal.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from numba.core.extending import overload
2
+ from numba.core import types
3
+ from numba.misc.special import literally, literal_unroll
4
+ from numba.core.errors import TypingError
5
+
6
+
7
+ @overload(literally)
8
+ def _ov_literally(obj):
9
+ if isinstance(obj, (types.Literal, types.InitialValue)):
10
+ return lambda obj: obj
11
+ else:
12
+ m = "Invalid use of non-Literal type in literally({})".format(obj)
13
+ raise TypingError(m)
14
+
15
+
16
+ @overload(literal_unroll)
17
+ def literal_unroll_impl(container):
18
+ if isinstance(container, types.Poison):
19
+ m = f"Invalid use of non-Literal type in literal_unroll({container})"
20
+ raise TypingError(m)
21
+
22
+ def impl(container):
23
+ return container
24
+ return impl
venv/lib/python3.10/site-packages/numba/misc/llvm_pass_timings.py ADDED
@@ -0,0 +1,409 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import operator
3
+ import heapq
4
+ from collections import namedtuple
5
+ from collections.abc import Sequence
6
+ from contextlib import contextmanager
7
+ from functools import cached_property
8
+
9
+ from numba.core import config
10
+
11
+ import llvmlite.binding as llvm
12
+
13
+
14
+ class RecordLLVMPassTimings:
15
+ """A helper context manager to track LLVM pass timings.
16
+ """
17
+
18
+ __slots__ = ["_data"]
19
+
20
+ def __enter__(self):
21
+ """Enables the pass timing in LLVM.
22
+ """
23
+ llvm.set_time_passes(True)
24
+ return self
25
+
26
+ def __exit__(self, exc_val, exc_type, exc_tb):
27
+ """Reset timings and save report internally.
28
+ """
29
+ self._data = llvm.report_and_reset_timings()
30
+ llvm.set_time_passes(False)
31
+ return
32
+
33
+ def get(self):
34
+ """Retrieve timing data for processing.
35
+
36
+ Returns
37
+ -------
38
+ timings: ProcessedPassTimings
39
+ """
40
+ return ProcessedPassTimings(self._data)
41
+
42
+
43
+ PassTimingRecord = namedtuple(
44
+ "PassTimingRecord",
45
+ [
46
+ "user_time",
47
+ "user_percent",
48
+ "system_time",
49
+ "system_percent",
50
+ "user_system_time",
51
+ "user_system_percent",
52
+ "wall_time",
53
+ "wall_percent",
54
+ "pass_name",
55
+ "instruction",
56
+ ],
57
+ )
58
+
59
+
60
+ def _adjust_timings(records):
61
+ """Adjust timing records because of truncated information.
62
+
63
+ Details: The percent information can be used to improve the timing
64
+ information.
65
+
66
+ Returns
67
+ -------
68
+ res: List[PassTimingRecord]
69
+ """
70
+ total_rec = records[-1]
71
+ assert total_rec.pass_name == "Total" # guard for implementation error
72
+
73
+ def make_adjuster(attr):
74
+ time_attr = f"{attr}_time"
75
+ percent_attr = f"{attr}_percent"
76
+ time_getter = operator.attrgetter(time_attr)
77
+
78
+ def adjust(d):
79
+ """Compute percent x total_time = adjusted"""
80
+ total = time_getter(total_rec)
81
+ adjusted = total * d[percent_attr] * 0.01
82
+ d[time_attr] = adjusted
83
+ return d
84
+
85
+ return adjust
86
+
87
+ # Make adjustment functions for each field
88
+ adj_fns = [
89
+ make_adjuster(x) for x in ["user", "system", "user_system", "wall"]
90
+ ]
91
+
92
+ # Extract dictionaries from the namedtuples
93
+ dicts = map(lambda x: x._asdict(), records)
94
+
95
+ def chained(d):
96
+ # Chain the adjustment functions
97
+ for fn in adj_fns:
98
+ d = fn(d)
99
+ # Reconstruct the namedtuple
100
+ return PassTimingRecord(**d)
101
+
102
+ return list(map(chained, dicts))
103
+
104
+
105
+ class ProcessedPassTimings:
106
+ """A class for processing raw timing report from LLVM.
107
+
108
+ The processing is done lazily so we don't waste time processing unused
109
+ timing information.
110
+ """
111
+
112
+ def __init__(self, raw_data):
113
+ self._raw_data = raw_data
114
+
115
+ def __bool__(self):
116
+ return bool(self._raw_data)
117
+
118
+ def get_raw_data(self):
119
+ """Returns the raw string data.
120
+
121
+ Returns
122
+ -------
123
+ res: str
124
+ """
125
+ return self._raw_data
126
+
127
+ def get_total_time(self):
128
+ """Compute the total time spend in all passes.
129
+
130
+ Returns
131
+ -------
132
+ res: float
133
+ """
134
+ return self.list_records()[-1].wall_time
135
+
136
+ def list_records(self):
137
+ """Get the processed data for the timing report.
138
+
139
+ Returns
140
+ -------
141
+ res: List[PassTimingRecord]
142
+ """
143
+ return self._processed
144
+
145
+ def list_top(self, n):
146
+ """Returns the top(n) most time-consuming (by wall-time) passes.
147
+
148
+ Parameters
149
+ ----------
150
+ n: int
151
+ This limits the maximum number of items to show.
152
+ This function will show the ``n`` most time-consuming passes.
153
+
154
+ Returns
155
+ -------
156
+ res: List[PassTimingRecord]
157
+ Returns the top(n) most time-consuming passes in descending order.
158
+ """
159
+ records = self.list_records()
160
+ key = operator.attrgetter("wall_time")
161
+ return heapq.nlargest(n, records[:-1], key)
162
+
163
+ def summary(self, topn=5, indent=0):
164
+ """Return a string summarizing the timing information.
165
+
166
+ Parameters
167
+ ----------
168
+ topn: int; optional
169
+ This limits the maximum number of items to show.
170
+ This function will show the ``topn`` most time-consuming passes.
171
+ indent: int; optional
172
+ Set the indentation level. Defaults to 0 for no indentation.
173
+
174
+ Returns
175
+ -------
176
+ res: str
177
+ """
178
+ buf = []
179
+ prefix = " " * indent
180
+
181
+ def ap(arg):
182
+ buf.append(f"{prefix}{arg}")
183
+
184
+ ap(f"Total {self.get_total_time():.4f}s")
185
+ ap("Top timings:")
186
+ for p in self.list_top(topn):
187
+ ap(f" {p.wall_time:.4f}s ({p.wall_percent:5}%) {p.pass_name}")
188
+ return "\n".join(buf)
189
+
190
+ @cached_property
191
+ def _processed(self):
192
+ """A cached property for lazily processing the data and returning it.
193
+
194
+ See ``_process()`` for details.
195
+ """
196
+ return self._process()
197
+
198
+ def _process(self):
199
+ """Parses the raw string data from LLVM timing report and attempts
200
+ to improve the data by recomputing the times
201
+ (See `_adjust_timings()``).
202
+ """
203
+
204
+ def parse(raw_data):
205
+ """A generator that parses the raw_data line-by-line to extract
206
+ timing information for each pass.
207
+ """
208
+ lines = raw_data.splitlines()
209
+ colheader = r"[a-zA-Z+ ]+"
210
+ # Take at least one column header.
211
+ multicolheaders = fr"(?:\s*-+{colheader}-+)+"
212
+
213
+ line_iter = iter(lines)
214
+ # find column headers
215
+ header_map = {
216
+ "User Time": "user",
217
+ "System Time": "system",
218
+ "User+System": "user_system",
219
+ "Wall Time": "wall",
220
+ "Instr": "instruction",
221
+ "Name": "pass_name",
222
+ }
223
+ for ln in line_iter:
224
+ m = re.match(multicolheaders, ln)
225
+ if m:
226
+ # Get all the column headers
227
+ raw_headers = re.findall(r"[a-zA-Z][a-zA-Z+ ]+", ln)
228
+ headers = [header_map[k.strip()] for k in raw_headers]
229
+ break
230
+
231
+ assert headers[-1] == 'pass_name'
232
+ # compute the list of available attributes from the column headers
233
+ attrs = []
234
+ n = r"\s*((?:[0-9]+\.)?[0-9]+)"
235
+ pat = ""
236
+ for k in headers[:-1]:
237
+ if k == "instruction":
238
+ pat += n
239
+ else:
240
+ attrs.append(f"{k}_time")
241
+ attrs.append(f"{k}_percent")
242
+ pat += rf"\s+(?:{n}\s*\({n}%\)|-+)"
243
+
244
+ # put default value 0.0 to all missing attributes
245
+ missing = {}
246
+ for k in PassTimingRecord._fields:
247
+ if k not in attrs and k != 'pass_name':
248
+ missing[k] = 0.0
249
+ # parse timings
250
+ pat += r"\s*(.*)"
251
+ for ln in line_iter:
252
+ m = re.match(pat, ln)
253
+ if m is not None:
254
+ raw_data = list(m.groups())
255
+ data = {k: float(v) if v is not None else 0.0
256
+ for k, v in zip(attrs, raw_data)}
257
+ data.update(missing)
258
+ pass_name = raw_data[-1]
259
+ rec = PassTimingRecord(
260
+ pass_name=pass_name, **data,
261
+ )
262
+ yield rec
263
+ if rec.pass_name == "Total":
264
+ # "Total" means the report has ended
265
+ break
266
+ # Check that we have reach the end of the report
267
+ remaining = '\n'.join(line_iter)
268
+ if remaining:
269
+ raise ValueError(
270
+ f"unexpected text after parser finished:\n{remaining}"
271
+ )
272
+
273
+ # Parse raw data
274
+ records = list(parse(self._raw_data))
275
+ return _adjust_timings(records)
276
+
277
+
278
+ NamedTimings = namedtuple("NamedTimings", ["name", "timings"])
279
+
280
+
281
+ class PassTimingsCollection(Sequence):
282
+ """A collection of pass timings.
283
+
284
+ This class implements the ``Sequence`` protocol for accessing the
285
+ individual timing records.
286
+ """
287
+
288
+ def __init__(self, name):
289
+ self._name = name
290
+ self._records = []
291
+
292
+ @contextmanager
293
+ def record(self, name):
294
+ """Record new timings and append to this collection.
295
+
296
+ Note: this is mainly for internal use inside the compiler pipeline.
297
+
298
+ See also ``RecordLLVMPassTimings``
299
+
300
+ Parameters
301
+ ----------
302
+ name: str
303
+ Name for the records.
304
+ """
305
+ if config.LLVM_PASS_TIMINGS:
306
+ # Recording of pass timings is enabled
307
+ with RecordLLVMPassTimings() as timings:
308
+ yield
309
+ rec = timings.get()
310
+ # Only keep non-empty records
311
+ if rec:
312
+ self._append(name, rec)
313
+ else:
314
+ # Do nothing. Recording of pass timings is disabled.
315
+ yield
316
+
317
+ def _append(self, name, timings):
318
+ """Append timing records
319
+
320
+ Parameters
321
+ ----------
322
+ name: str
323
+ Name for the records.
324
+ timings: ProcessedPassTimings
325
+ the timing records.
326
+ """
327
+ self._records.append(NamedTimings(name, timings))
328
+
329
+ def get_total_time(self):
330
+ """Computes the sum of the total time across all contained timings.
331
+
332
+ Returns
333
+ -------
334
+ res: float or None
335
+ Returns the total number of seconds or None if no timings were
336
+ recorded
337
+ """
338
+ if self._records:
339
+ return sum(r.timings.get_total_time() for r in self._records)
340
+ else:
341
+ return None
342
+
343
+ def list_longest_first(self):
344
+ """Returns the timings in descending order of total time duration.
345
+
346
+ Returns
347
+ -------
348
+ res: List[ProcessedPassTimings]
349
+ """
350
+ return sorted(self._records,
351
+ key=lambda x: x.timings.get_total_time(),
352
+ reverse=True)
353
+
354
+ @property
355
+ def is_empty(self):
356
+ """
357
+ """
358
+ return not self._records
359
+
360
+ def summary(self, topn=5):
361
+ """Return a string representing the summary of the timings.
362
+
363
+ Parameters
364
+ ----------
365
+ topn: int; optional, default=5.
366
+ This limits the maximum number of items to show.
367
+ This function will show the ``topn`` most time-consuming passes.
368
+
369
+ Returns
370
+ -------
371
+ res: str
372
+
373
+ See also ``ProcessedPassTimings.summary()``
374
+ """
375
+ if self.is_empty:
376
+ return "No pass timings were recorded"
377
+ else:
378
+ buf = []
379
+ ap = buf.append
380
+ ap(f"Printing pass timings for {self._name}")
381
+ overall_time = self.get_total_time()
382
+ ap(f"Total time: {overall_time:.4f}")
383
+ for i, r in enumerate(self._records):
384
+ ap(f"== #{i} {r.name}")
385
+ percent = r.timings.get_total_time() / overall_time * 100
386
+ ap(f" Percent: {percent:.1f}%")
387
+ ap(r.timings.summary(topn=topn, indent=1))
388
+ return "\n".join(buf)
389
+
390
+ def __getitem__(self, i):
391
+ """Get the i-th timing record.
392
+
393
+ Returns
394
+ -------
395
+ res: (name, timings)
396
+ A named tuple with two fields:
397
+
398
+ - name: str
399
+ - timings: ProcessedPassTimings
400
+ """
401
+ return self._records[i]
402
+
403
+ def __len__(self):
404
+ """Length of this collection.
405
+ """
406
+ return len(self._records)
407
+
408
+ def __str__(self):
409
+ return self.summary()
venv/lib/python3.10/site-packages/numba/misc/mergesort.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The same algorithm as translated from numpy.
3
+ See numpy/core/src/npysort/mergesort.c.src.
4
+ The high-level numba code is adding a little overhead comparing to
5
+ the pure-C implementation in numpy.
6
+ """
7
+ import numpy as np
8
+ from collections import namedtuple
9
+
10
+ # Array size smaller than this will be sorted by insertion sort
11
+ SMALL_MERGESORT = 20
12
+
13
+
14
+ MergesortImplementation = namedtuple('MergesortImplementation', [
15
+ 'run_mergesort',
16
+ ])
17
+
18
+
19
+ def make_mergesort_impl(wrap, lt=None, is_argsort=False):
20
+ kwargs_lite = dict(no_cpython_wrapper=True, _nrt=False)
21
+
22
+ # The less than
23
+ if lt is None:
24
+ @wrap(**kwargs_lite)
25
+ def lt(a, b):
26
+ return a < b
27
+ else:
28
+ lt = wrap(**kwargs_lite)(lt)
29
+
30
+ if is_argsort:
31
+ @wrap(**kwargs_lite)
32
+ def lessthan(a, b, vals):
33
+ return lt(vals[a], vals[b])
34
+ else:
35
+ @wrap(**kwargs_lite)
36
+ def lessthan(a, b, vals):
37
+ return lt(a, b)
38
+
39
+ @wrap(**kwargs_lite)
40
+ def argmergesort_inner(arr, vals, ws):
41
+ """The actual mergesort function
42
+
43
+ Parameters
44
+ ----------
45
+ arr : array [read+write]
46
+ The values being sorted inplace. For argsort, this is the
47
+ indices.
48
+ vals : array [readonly]
49
+ ``None`` for normal sort. In argsort, this is the actual array values.
50
+ ws : array [write]
51
+ The workspace. Must be of size ``arr.size // 2``
52
+ """
53
+ if arr.size > SMALL_MERGESORT:
54
+ # Merge sort
55
+ mid = arr.size // 2
56
+
57
+ argmergesort_inner(arr[:mid], vals, ws)
58
+ argmergesort_inner(arr[mid:], vals, ws)
59
+
60
+ # Copy left half into workspace so we don't overwrite it
61
+ for i in range(mid):
62
+ ws[i] = arr[i]
63
+
64
+ # Merge
65
+ left = ws[:mid]
66
+ right = arr[mid:]
67
+ out = arr
68
+
69
+ i = j = k = 0
70
+ while i < left.size and j < right.size:
71
+ if not lessthan(right[j], left[i], vals):
72
+ out[k] = left[i]
73
+ i += 1
74
+ else:
75
+ out[k] = right[j]
76
+ j += 1
77
+ k += 1
78
+
79
+ # Leftovers
80
+ while i < left.size:
81
+ out[k] = left[i]
82
+ i += 1
83
+ k += 1
84
+
85
+ while j < right.size:
86
+ out[k] = right[j]
87
+ j += 1
88
+ k += 1
89
+ else:
90
+ # Insertion sort
91
+ i = 1
92
+ while i < arr.size:
93
+ j = i
94
+ while j > 0 and lessthan(arr[j], arr[j - 1], vals):
95
+ arr[j - 1], arr[j] = arr[j], arr[j - 1]
96
+ j -= 1
97
+ i += 1
98
+
99
+ # The top-level entry points
100
+
101
+ @wrap(no_cpython_wrapper=True)
102
+ def mergesort(arr):
103
+ "Inplace"
104
+ ws = np.empty(arr.size // 2, dtype=arr.dtype)
105
+ argmergesort_inner(arr, None, ws)
106
+ return arr
107
+
108
+
109
+ @wrap(no_cpython_wrapper=True)
110
+ def argmergesort(arr):
111
+ "Out-of-place"
112
+ idxs = np.arange(arr.size)
113
+ ws = np.empty(arr.size // 2, dtype=idxs.dtype)
114
+ argmergesort_inner(idxs, arr, ws)
115
+ return idxs
116
+
117
+ return MergesortImplementation(
118
+ run_mergesort=(argmergesort if is_argsort else mergesort)
119
+ )
120
+
121
+
122
+ def make_jit_mergesort(*args, **kwargs):
123
+ from numba import njit
124
+ # NOTE: wrap with njit to allow recursion
125
+ # because @register_jitable => @overload doesn't support recursion
126
+ return make_mergesort_impl(njit, *args, **kwargs)
venv/lib/python3.10/site-packages/numba/misc/numba_entry.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import argparse
3
+ import os
4
+ import subprocess
5
+ import json
6
+
7
+ from .numba_sysinfo import display_sysinfo, get_sysinfo
8
+ from .numba_gdbinfo import display_gdbinfo
9
+
10
+
11
+ def make_parser():
12
+ parser = argparse.ArgumentParser()
13
+ parser.add_argument('--annotate', help='Annotate source',
14
+ action='store_true')
15
+ parser.add_argument('--dump-llvm', action="store_true",
16
+ help='Print generated llvm assembly')
17
+ parser.add_argument('--dump-optimized', action='store_true',
18
+ help='Dump the optimized llvm assembly')
19
+ parser.add_argument('--dump-assembly', action='store_true',
20
+ help='Dump the LLVM generated assembly')
21
+ parser.add_argument('--annotate-html', nargs=1,
22
+ help='Output source annotation as html')
23
+ parser.add_argument('-s', '--sysinfo', action="store_true",
24
+ help='Output system information for bug reporting')
25
+ parser.add_argument('-g', '--gdbinfo', action="store_true",
26
+ help='Output system information about gdb')
27
+ parser.add_argument('--sys-json', nargs=1,
28
+ help='Saves the system info dict as a json file')
29
+ parser.add_argument('filename', nargs='?', help='Python source filename')
30
+ return parser
31
+
32
+
33
+ def main():
34
+ parser = make_parser()
35
+ args = parser.parse_args()
36
+
37
+ if args.sysinfo:
38
+ print("System info:")
39
+ display_sysinfo()
40
+
41
+ if args.gdbinfo:
42
+ print("GDB info:")
43
+ display_gdbinfo()
44
+
45
+ if args.sysinfo or args.gdbinfo:
46
+ sys.exit(0)
47
+
48
+ if args.sys_json:
49
+ info = get_sysinfo()
50
+ info.update({'Start': info['Start'].isoformat()})
51
+ info.update({'Start UTC': info['Start UTC'].isoformat()})
52
+ with open(args.sys_json[0], 'w') as f:
53
+ json.dump(info, f, indent=4)
54
+ sys.exit(0)
55
+
56
+ os.environ['NUMBA_DUMP_ANNOTATION'] = str(int(args.annotate))
57
+ if args.annotate_html is not None:
58
+ try:
59
+ from jinja2 import Template
60
+ except ImportError:
61
+ raise ImportError("Please install the 'jinja2' package")
62
+ os.environ['NUMBA_DUMP_HTML'] = str(args.annotate_html[0])
63
+ os.environ['NUMBA_DUMP_LLVM'] = str(int(args.dump_llvm))
64
+ os.environ['NUMBA_DUMP_OPTIMIZED'] = str(int(args.dump_optimized))
65
+ os.environ['NUMBA_DUMP_ASSEMBLY'] = str(int(args.dump_assembly))
66
+
67
+ if args.filename:
68
+ cmd = [sys.executable, args.filename]
69
+ subprocess.call(cmd)
70
+ else:
71
+ print("numba: error: the following arguments are required: filename")
72
+ sys.exit(1)
venv/lib/python3.10/site-packages/numba/misc/numba_gdbinfo.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Module for displaying information about Numba's gdb set up"""
2
+ from collections import namedtuple
3
+ import os
4
+ import re
5
+ import subprocess
6
+ from textwrap import dedent
7
+ from numba import config
8
+
9
+ # Container for the output of the gdb info data collection
10
+ _fields = ('binary_loc, extension_loc, py_ver, np_ver, supported')
11
+ _gdb_info = namedtuple('_gdb_info', _fields)
12
+
13
+
14
+ class _GDBTestWrapper():
15
+ """Wraps the gdb binary and has methods for checking what the gdb binary
16
+ has support for (Python and NumPy)."""
17
+
18
+ def __init__(self,):
19
+ gdb_binary = config.GDB_BINARY
20
+ if gdb_binary is None:
21
+ msg = ("No valid binary could be found for gdb named: "
22
+ f"{config.GDB_BINARY}")
23
+ raise ValueError(msg)
24
+ self._gdb_binary = gdb_binary
25
+
26
+ def _run_cmd(self, cmd=()):
27
+ gdb_call = [self.gdb_binary, '-q',]
28
+ for x in cmd:
29
+ gdb_call.append('-ex')
30
+ gdb_call.append(x)
31
+ gdb_call.extend(['-ex', 'q'])
32
+ return subprocess.run(gdb_call, capture_output=True, timeout=10,
33
+ text=True)
34
+
35
+ @property
36
+ def gdb_binary(self):
37
+ return self._gdb_binary
38
+
39
+ @classmethod
40
+ def success(cls, status):
41
+ return status.returncode == 0
42
+
43
+ def check_launch(self):
44
+ """Checks that gdb will launch ok"""
45
+ return self._run_cmd()
46
+
47
+ def check_python(self):
48
+ cmd = ("python from __future__ import print_function; "
49
+ "import sys; print(sys.version_info[:2])")
50
+ return self._run_cmd((cmd,))
51
+
52
+ def check_numpy(self):
53
+ cmd = ("python from __future__ import print_function; "
54
+ "import types; import numpy; "
55
+ "print(isinstance(numpy, types.ModuleType))")
56
+ return self._run_cmd((cmd,))
57
+
58
+ def check_numpy_version(self):
59
+ cmd = ("python from __future__ import print_function; "
60
+ "import types; import numpy;"
61
+ "print(numpy.__version__)")
62
+ return self._run_cmd((cmd,))
63
+
64
+
65
+ def collect_gdbinfo():
66
+ """Prints information to stdout about the gdb setup that Numba has found"""
67
+
68
+ # State flags:
69
+ gdb_state = None
70
+ gdb_has_python = False
71
+ gdb_has_numpy = False
72
+ gdb_python_version = 'No Python support'
73
+ gdb_python_numpy_version = "No NumPy support"
74
+
75
+ # There are so many ways for gdb to not be working as expected. Surround
76
+ # the "is it working" tests with try/except and if there's an exception
77
+ # store it for processing later.
78
+ try:
79
+ # Check gdb exists
80
+ gdb_wrapper = _GDBTestWrapper()
81
+
82
+ # Check gdb works
83
+ status = gdb_wrapper.check_launch()
84
+ if not gdb_wrapper.success(status):
85
+ msg = (f"gdb at '{gdb_wrapper.gdb_binary}' does not appear to work."
86
+ f"\nstdout: {status.stdout}\nstderr: {status.stderr}")
87
+ raise ValueError(msg)
88
+ gdb_state = gdb_wrapper.gdb_binary
89
+ except Exception as e:
90
+ gdb_state = f"Testing gdb binary failed. Reported Error: {e}"
91
+ else:
92
+ # Got this far, so gdb works, start checking what it supports
93
+ status = gdb_wrapper.check_python()
94
+ if gdb_wrapper.success(status):
95
+ version_match = re.match(r'\((\d+),\s+(\d+)\)',
96
+ status.stdout.strip())
97
+ if version_match is not None:
98
+ pymajor, pyminor = version_match.groups()
99
+ gdb_python_version = f"{pymajor}.{pyminor}"
100
+ gdb_has_python = True
101
+
102
+ status = gdb_wrapper.check_numpy()
103
+ if gdb_wrapper.success(status):
104
+ if "Traceback" not in status.stderr.strip():
105
+ if status.stdout.strip() == 'True':
106
+ gdb_has_numpy = True
107
+ gdb_python_numpy_version = "Unknown"
108
+ # NumPy is present find the version
109
+ status = gdb_wrapper.check_numpy_version()
110
+ if gdb_wrapper.success(status):
111
+ if "Traceback" not in status.stderr.strip():
112
+ gdb_python_numpy_version = \
113
+ status.stdout.strip()
114
+
115
+ # Work out what level of print-extension support is present in this gdb
116
+ if gdb_has_python:
117
+ if gdb_has_numpy:
118
+ print_ext_supported = "Full (Python and NumPy supported)"
119
+ else:
120
+ print_ext_supported = "Partial (Python only, no NumPy support)"
121
+ else:
122
+ print_ext_supported = "None"
123
+
124
+ # Work out print ext location
125
+ print_ext_file = "gdb_print_extension.py"
126
+ print_ext_path = os.path.join(os.path.dirname(__file__), print_ext_file)
127
+
128
+ # return!
129
+ return _gdb_info(gdb_state, print_ext_path, gdb_python_version,
130
+ gdb_python_numpy_version, print_ext_supported)
131
+
132
+
133
+ def display_gdbinfo(sep_pos=45):
134
+ """Displays the information collected by collect_gdbinfo.
135
+ """
136
+ gdb_info = collect_gdbinfo()
137
+ print('-' * 80)
138
+ fmt = f'%-{sep_pos}s : %-s'
139
+ # Display the information
140
+ print(fmt % ("Binary location", gdb_info.binary_loc))
141
+ print(fmt % ("Print extension location", gdb_info.extension_loc))
142
+ print(fmt % ("Python version", gdb_info.py_ver))
143
+ print(fmt % ("NumPy version", gdb_info.np_ver))
144
+ print(fmt % ("Numba printing extension support", gdb_info.supported))
145
+
146
+ print("")
147
+ print("To load the Numba gdb printing extension, execute the following "
148
+ "from the gdb prompt:")
149
+ print(f"\nsource {gdb_info.extension_loc}\n")
150
+ print('-' * 80)
151
+ warn = """
152
+ =============================================================
153
+ IMPORTANT: Before sharing you should remove any information
154
+ in the above that you wish to keep private e.g. paths.
155
+ =============================================================
156
+ """
157
+ print(dedent(warn))
158
+
159
+
160
+ if __name__ == '__main__':
161
+ display_gdbinfo()
venv/lib/python3.10/site-packages/numba/misc/numba_sysinfo.py ADDED
@@ -0,0 +1,698 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import locale
3
+ import multiprocessing
4
+ import os
5
+ import platform
6
+ import textwrap
7
+ import sys
8
+ from contextlib import redirect_stdout
9
+ from datetime import datetime
10
+ from io import StringIO
11
+ from subprocess import check_output, PIPE, CalledProcessError
12
+ import numpy as np
13
+ import llvmlite.binding as llvmbind
14
+ from llvmlite import __version__ as llvmlite_version
15
+ from numba import cuda as cu, __version__ as version_number
16
+ from numba.cuda import cudadrv
17
+ from numba.cuda.cudadrv.driver import driver as cudriver
18
+ from numba.cuda.cudadrv.runtime import runtime as curuntime
19
+ from numba.core import config
20
+
21
+ _psutil_import = False
22
+ try:
23
+ import psutil
24
+ except ImportError:
25
+ pass
26
+ else:
27
+ _psutil_import = True
28
+
29
+ __all__ = ['get_sysinfo', 'display_sysinfo']
30
+
31
+ # Keys of a `sysinfo` dictionary
32
+
33
+ # Time info
34
+ _start, _start_utc, _runtime = 'Start', 'Start UTC', 'Runtime'
35
+ _numba_version = 'Numba Version'
36
+ # Hardware info
37
+ _machine = 'Machine'
38
+ _cpu_name, _cpu_count = 'CPU Name', 'CPU Count'
39
+ _cpus_allowed, _cpus_list = 'CPUs Allowed', 'List CPUs Allowed'
40
+ _cpu_features = 'CPU Features'
41
+ _cfs_quota, _cfs_period = 'CFS Quota', 'CFS Period',
42
+ _cfs_restrict = 'CFS Restriction'
43
+ _mem_total, _mem_available = 'Mem Total', 'Mem Available'
44
+ # OS info
45
+ _platform_name, _platform_release = 'Platform Name', 'Platform Release'
46
+ _os_name, _os_version = 'OS Name', 'OS Version'
47
+ _os_spec_version = 'OS Specific Version'
48
+ _libc_version = 'Libc Version'
49
+ # Python info
50
+ _python_comp = 'Python Compiler'
51
+ _python_impl = 'Python Implementation'
52
+ _python_version = 'Python Version'
53
+ _python_locale = 'Python Locale'
54
+ # LLVM info
55
+ _llvmlite_version = 'llvmlite Version'
56
+ _llvm_version = 'LLVM Version'
57
+ # CUDA info
58
+ _cu_target_impl = 'CUDA Target Impl'
59
+ _cu_dev_init = 'CUDA Device Init'
60
+ _cu_drv_ver = 'CUDA Driver Version'
61
+ _cu_rt_ver = 'CUDA Runtime Version'
62
+ _cu_nvidia_bindings = 'NVIDIA CUDA Bindings'
63
+ _cu_nvidia_bindings_used = 'NVIDIA CUDA Bindings In Use'
64
+ _cu_detect_out, _cu_lib_test = 'CUDA Detect Output', 'CUDA Lib Test'
65
+ _cu_mvc_available = 'NVIDIA CUDA Minor Version Compatibility Available'
66
+ _cu_mvc_needed = 'NVIDIA CUDA Minor Version Compatibility Needed'
67
+ _cu_mvc_in_use = 'NVIDIA CUDA Minor Version Compatibility In Use'
68
+ # NumPy info
69
+ _numpy_version = 'NumPy Version'
70
+ _numpy_supported_simd_features = 'NumPy Supported SIMD features'
71
+ _numpy_supported_simd_dispatch = 'NumPy Supported SIMD dispatch'
72
+ _numpy_supported_simd_baseline = 'NumPy Supported SIMD baseline'
73
+ _numpy_AVX512_SKX_detected = 'NumPy AVX512_SKX detected'
74
+ # SVML info
75
+ _svml_state, _svml_loaded = 'SVML State', 'SVML Lib Loaded'
76
+ _llvm_svml_patched = 'LLVM SVML Patched'
77
+ _svml_operational = 'SVML Operational'
78
+ # Threading layer info
79
+ _tbb_thread, _tbb_error = 'TBB Threading', 'TBB Threading Error'
80
+ _openmp_thread, _openmp_error = 'OpenMP Threading', 'OpenMP Threading Error'
81
+ _openmp_vendor = 'OpenMP vendor'
82
+ _wkq_thread, _wkq_error = 'Workqueue Threading', 'Workqueue Threading Error'
83
+ # Numba info
84
+ _numba_env_vars = 'Numba Env Vars'
85
+ # Conda info
86
+ _conda_build_ver, _conda_env_ver = 'Conda Build', 'Conda Env'
87
+ _conda_platform, _conda_python_ver = 'Conda Platform', 'Conda Python Version'
88
+ _conda_root_writable = 'Conda Root Writable'
89
+ # Packages info
90
+ _inst_pkg = 'Installed Packages'
91
+ # Psutil info
92
+ _psutil = 'Psutil Available'
93
+ # Errors and warnings
94
+ _errors = 'Errors'
95
+ _warnings = 'Warnings'
96
+
97
+ # Error and warning log
98
+ _error_log = []
99
+ _warning_log = []
100
+
101
+
102
+ def get_os_spec_info(os_name):
103
+ # Linux man page for `/proc`:
104
+ # http://man7.org/linux/man-pages/man5/proc.5.html
105
+
106
+ # Windows documentation for `wmic OS`:
107
+ # https://docs.microsoft.com/en-us/windows/win32/cimwin32prov/cim-operatingsystem
108
+
109
+ # MacOS man page for `sysctl`:
110
+ # https://www.unix.com/man-page/osx/3/sysctl/
111
+ # MacOS man page for `vm_stat`:
112
+ # https://www.unix.com/man-page/osx/1/vm_stat/
113
+
114
+ class CmdBufferOut(tuple):
115
+ buffer_output_flag = True
116
+
117
+ class CmdReadFile(tuple):
118
+ read_file_flag = True
119
+
120
+ shell_params = {
121
+ 'Linux': {
122
+ 'cmd': (
123
+ CmdReadFile(('/sys/fs/cgroup/cpuacct/cpu.cfs_quota_us',)),
124
+ CmdReadFile(('/sys/fs/cgroup/cpuacct/cpu.cfs_period_us',)),
125
+ ),
126
+ 'cmd_optional': (
127
+ CmdReadFile(('/proc/meminfo',)),
128
+ CmdReadFile(('/proc/self/status',)),
129
+ ),
130
+ 'kwds': {
131
+ # output string fragment -> result dict key
132
+ 'MemTotal:': _mem_total,
133
+ 'MemAvailable:': _mem_available,
134
+ 'Cpus_allowed:': _cpus_allowed,
135
+ 'Cpus_allowed_list:': _cpus_list,
136
+ '/sys/fs/cgroup/cpuacct/cpu.cfs_quota_us': _cfs_quota,
137
+ '/sys/fs/cgroup/cpuacct/cpu.cfs_period_us': _cfs_period,
138
+ },
139
+ },
140
+ 'Windows': {
141
+ 'cmd': (),
142
+ 'cmd_optional': (
143
+ CmdBufferOut(('wmic', 'OS', 'get', 'TotalVirtualMemorySize')),
144
+ CmdBufferOut(('wmic', 'OS', 'get', 'FreeVirtualMemory')),
145
+ ),
146
+ 'kwds': {
147
+ # output string fragment -> result dict key
148
+ 'TotalVirtualMemorySize': _mem_total,
149
+ 'FreeVirtualMemory': _mem_available,
150
+ },
151
+ },
152
+ 'Darwin': {
153
+ 'cmd': (),
154
+ 'cmd_optional': (
155
+ ('sysctl', 'hw.memsize'),
156
+ ('vm_stat'),
157
+ ),
158
+ 'kwds': {
159
+ # output string fragment -> result dict key
160
+ 'hw.memsize:': _mem_total,
161
+ 'free:': _mem_available,
162
+ },
163
+ 'units': {
164
+ _mem_total: 1, # Size is given in bytes.
165
+ _mem_available: 4096, # Size is given in 4kB pages.
166
+ },
167
+ },
168
+ }
169
+
170
+ os_spec_info = {}
171
+ params = shell_params.get(os_name, {})
172
+ cmd_selected = params.get('cmd', ())
173
+
174
+ if _psutil_import:
175
+ vm = psutil.virtual_memory()
176
+ os_spec_info.update({
177
+ _mem_total: vm.total,
178
+ _mem_available: vm.available,
179
+ })
180
+ p = psutil.Process()
181
+ cpus_allowed = p.cpu_affinity() if hasattr(p, 'cpu_affinity') else []
182
+ if cpus_allowed:
183
+ os_spec_info[_cpus_allowed] = len(cpus_allowed)
184
+ os_spec_info[_cpus_list] = ' '.join(str(n) for n in cpus_allowed)
185
+
186
+ else:
187
+ _warning_log.append(
188
+ "Warning (psutil): psutil cannot be imported. "
189
+ "For more accuracy, consider installing it.")
190
+ # Fallback to internal heuristics
191
+ cmd_selected += params.get('cmd_optional', ())
192
+
193
+ # Assuming the shell cmd returns a unique (k, v) pair per line
194
+ # or a unique (k, v) pair spread over several lines:
195
+ # Gather output in a list of strings containing a keyword and some value.
196
+ output = []
197
+ for cmd in cmd_selected:
198
+ if hasattr(cmd, 'read_file_flag'):
199
+ # Open file within Python
200
+ if os.path.exists(cmd[0]):
201
+ try:
202
+ with open(cmd[0], 'r') as f:
203
+ out = f.readlines()
204
+ if out:
205
+ out[0] = ' '.join((cmd[0], out[0]))
206
+ output.extend(out)
207
+ except OSError as e:
208
+ _error_log.append(f'Error (file read): {e}')
209
+ continue
210
+ else:
211
+ _warning_log.append('Warning (no file): {}'.format(cmd[0]))
212
+ continue
213
+ else:
214
+ # Spawn a subprocess
215
+ try:
216
+ out = check_output(cmd, stderr=PIPE)
217
+ except (OSError, CalledProcessError) as e:
218
+ _error_log.append(f'Error (subprocess): {e}')
219
+ continue
220
+ if hasattr(cmd, 'buffer_output_flag'):
221
+ out = b' '.join(line for line in out.splitlines()) + b'\n'
222
+ output.extend(out.decode().splitlines())
223
+
224
+ # Extract (k, output) pairs by searching for keywords in output
225
+ kwds = params.get('kwds', {})
226
+ for line in output:
227
+ match = kwds.keys() & line.split()
228
+ if match and len(match) == 1:
229
+ k = kwds[match.pop()]
230
+ os_spec_info[k] = line
231
+ elif len(match) > 1:
232
+ print(f'Ambiguous output: {line}')
233
+
234
+ # Try to extract something meaningful from output string
235
+ def format():
236
+ # CFS restrictions
237
+ split = os_spec_info.get(_cfs_quota, '').split()
238
+ if split:
239
+ os_spec_info[_cfs_quota] = float(split[-1])
240
+ split = os_spec_info.get(_cfs_period, '').split()
241
+ if split:
242
+ os_spec_info[_cfs_period] = float(split[-1])
243
+ if os_spec_info.get(_cfs_quota, -1) != -1:
244
+ cfs_quota = os_spec_info.get(_cfs_quota, '')
245
+ cfs_period = os_spec_info.get(_cfs_period, '')
246
+ runtime_amount = cfs_quota / cfs_period
247
+ os_spec_info[_cfs_restrict] = runtime_amount
248
+
249
+ def format_optional():
250
+ # Memory
251
+ units = {_mem_total: 1024, _mem_available: 1024}
252
+ units.update(params.get('units', {}))
253
+ for k in (_mem_total, _mem_available):
254
+ digits = ''.join(d for d in os_spec_info.get(k, '') if d.isdigit())
255
+ os_spec_info[k] = int(digits or 0) * units[k]
256
+ # Accessible CPUs
257
+ split = os_spec_info.get(_cpus_allowed, '').split()
258
+ if split:
259
+ n = split[-1]
260
+ n = n.split(',')[-1]
261
+ os_spec_info[_cpus_allowed] = str(bin(int(n or 0, 16))).count('1')
262
+ split = os_spec_info.get(_cpus_list, '').split()
263
+ if split:
264
+ os_spec_info[_cpus_list] = split[-1]
265
+
266
+ try:
267
+ format()
268
+ if not _psutil_import:
269
+ format_optional()
270
+ except Exception as e:
271
+ _error_log.append(f'Error (format shell output): {e}')
272
+
273
+ # Call OS specific functions
274
+ os_specific_funcs = {
275
+ 'Linux': {
276
+ _libc_version: lambda: ' '.join(platform.libc_ver())
277
+ },
278
+ 'Windows': {
279
+ _os_spec_version: lambda: ' '.join(
280
+ s for s in platform.win32_ver()),
281
+ },
282
+ 'Darwin': {
283
+ _os_spec_version: lambda: ''.join(
284
+ i or ' ' for s in tuple(platform.mac_ver()) for i in s),
285
+ },
286
+ }
287
+ key_func = os_specific_funcs.get(os_name, {})
288
+ os_spec_info.update({k: f() for k, f in key_func.items()})
289
+ return os_spec_info
290
+
291
+
292
+ def get_sysinfo():
293
+
294
+ # Gather the information that shouldn't raise exceptions
295
+ sys_info = {
296
+ _start: datetime.now(),
297
+ _start_utc: datetime.utcnow(),
298
+ _machine: platform.machine(),
299
+ _cpu_name: llvmbind.get_host_cpu_name(),
300
+ _cpu_count: multiprocessing.cpu_count(),
301
+ _platform_name: platform.platform(aliased=True),
302
+ _platform_release: platform.release(),
303
+ _os_name: platform.system(),
304
+ _os_version: platform.version(),
305
+ _python_comp: platform.python_compiler(),
306
+ _python_impl: platform.python_implementation(),
307
+ _python_version: platform.python_version(),
308
+ _numba_env_vars: {k: v for (k, v) in os.environ.items()
309
+ if k.startswith('NUMBA_')},
310
+ _numba_version: version_number,
311
+ _llvm_version: '.'.join(str(i) for i in llvmbind.llvm_version_info),
312
+ _llvmlite_version: llvmlite_version,
313
+ _psutil: _psutil_import,
314
+ }
315
+
316
+ # CPU features
317
+ try:
318
+ feature_map = llvmbind.get_host_cpu_features()
319
+ except RuntimeError as e:
320
+ _error_log.append(f'Error (CPU features): {e}')
321
+ else:
322
+ features = sorted([key for key, value in feature_map.items() if value])
323
+ sys_info[_cpu_features] = ' '.join(features)
324
+
325
+ # Python locale
326
+ # On MacOSX, getdefaultlocale can raise. Check again if Py > 3.7.5
327
+ try:
328
+ # If $LANG is unset, getdefaultlocale() can return (None, None), make
329
+ # sure we can encode this as strings by casting explicitly.
330
+ sys_info[_python_locale] = '.'.join([str(i) for i in
331
+ locale.getdefaultlocale()])
332
+ except Exception as e:
333
+ _error_log.append(f'Error (locale): {e}')
334
+
335
+ # CUDA information
336
+ try:
337
+ sys_info[_cu_target_impl] = cu.implementation
338
+ except AttributeError:
339
+ # On the offchance an out-of-tree target did not set the
340
+ # implementation, we can try to continue
341
+ pass
342
+
343
+ try:
344
+ cu.list_devices()[0] # will a device initialise?
345
+ except Exception as e:
346
+ sys_info[_cu_dev_init] = False
347
+ msg_not_found = "CUDA driver library cannot be found"
348
+ msg_disabled_by_user = "CUDA is disabled"
349
+ msg_end = " or no CUDA enabled devices are present."
350
+ msg_generic_problem = "CUDA device initialisation problem."
351
+ msg = getattr(e, 'msg', None)
352
+ if msg is not None:
353
+ if msg_not_found in msg:
354
+ err_msg = msg_not_found + msg_end
355
+ elif msg_disabled_by_user in msg:
356
+ err_msg = msg_disabled_by_user + msg_end
357
+ else:
358
+ err_msg = msg_generic_problem + " Message:" + msg
359
+ else:
360
+ err_msg = msg_generic_problem + " " + str(e)
361
+ # Best effort error report
362
+ _warning_log.append("Warning (cuda): %s\nException class: %s" %
363
+ (err_msg, str(type(e))))
364
+ else:
365
+ try:
366
+ sys_info[_cu_dev_init] = True
367
+
368
+ output = StringIO()
369
+ with redirect_stdout(output):
370
+ cu.detect()
371
+ sys_info[_cu_detect_out] = output.getvalue()
372
+ output.close()
373
+
374
+ cu_drv_ver = cudriver.get_version()
375
+ cu_rt_ver = curuntime.get_version()
376
+ sys_info[_cu_drv_ver] = '%s.%s' % cu_drv_ver
377
+ sys_info[_cu_rt_ver] = '%s.%s' % cu_rt_ver
378
+
379
+ output = StringIO()
380
+ with redirect_stdout(output):
381
+ cudadrv.libs.test()
382
+ sys_info[_cu_lib_test] = output.getvalue()
383
+ output.close()
384
+
385
+ try:
386
+ from cuda import cuda # noqa: F401
387
+ nvidia_bindings_available = True
388
+ except ImportError:
389
+ nvidia_bindings_available = False
390
+ sys_info[_cu_nvidia_bindings] = nvidia_bindings_available
391
+
392
+ nv_binding_used = bool(cudadrv.driver.USE_NV_BINDING)
393
+ sys_info[_cu_nvidia_bindings_used] = nv_binding_used
394
+
395
+ try:
396
+ from ptxcompiler import compile_ptx # noqa: F401
397
+ from cubinlinker import CubinLinker # noqa: F401
398
+ sys_info[_cu_mvc_available] = True
399
+ except ImportError:
400
+ sys_info[_cu_mvc_available] = False
401
+
402
+ sys_info[_cu_mvc_needed] = cu_rt_ver > cu_drv_ver
403
+ sys_info[_cu_mvc_in_use] = bool(
404
+ config.CUDA_ENABLE_MINOR_VERSION_COMPATIBILITY)
405
+ except Exception as e:
406
+ _warning_log.append(
407
+ "Warning (cuda): Probing CUDA failed "
408
+ "(device and driver present, runtime problem?)\n"
409
+ f"(cuda) {type(e)}: {e}")
410
+
411
+ # NumPy information
412
+ sys_info[_numpy_version] = np.version.full_version
413
+ try:
414
+ # NOTE: These consts were added in NumPy 1.20
415
+ from numpy.core._multiarray_umath import (__cpu_features__,
416
+ __cpu_dispatch__,
417
+ __cpu_baseline__,)
418
+ except ImportError:
419
+ sys_info[_numpy_AVX512_SKX_detected] = False
420
+ else:
421
+ feat_filtered = [k for k, v in __cpu_features__.items() if v]
422
+ sys_info[_numpy_supported_simd_features] = feat_filtered
423
+ sys_info[_numpy_supported_simd_dispatch] = __cpu_dispatch__
424
+ sys_info[_numpy_supported_simd_baseline] = __cpu_baseline__
425
+ sys_info[_numpy_AVX512_SKX_detected] = \
426
+ __cpu_features__.get("AVX512_SKX", False)
427
+
428
+ # SVML information
429
+ # Replicate some SVML detection logic from numba.__init__ here.
430
+ # If SVML load fails in numba.__init__ the splitting of the logic
431
+ # here will help diagnosing the underlying issue.
432
+ svml_lib_loaded = True
433
+ try:
434
+ if sys.platform.startswith('linux'):
435
+ llvmbind.load_library_permanently("libsvml.so")
436
+ elif sys.platform.startswith('darwin'):
437
+ llvmbind.load_library_permanently("libsvml.dylib")
438
+ elif sys.platform.startswith('win'):
439
+ llvmbind.load_library_permanently("svml_dispmd")
440
+ else:
441
+ svml_lib_loaded = False
442
+ except Exception:
443
+ svml_lib_loaded = False
444
+ func = getattr(llvmbind.targets, "has_svml", None)
445
+ sys_info[_llvm_svml_patched] = func() if func else False
446
+ sys_info[_svml_state] = config.USING_SVML
447
+ sys_info[_svml_loaded] = svml_lib_loaded
448
+ sys_info[_svml_operational] = all((
449
+ sys_info[_svml_state],
450
+ sys_info[_svml_loaded],
451
+ sys_info[_llvm_svml_patched],
452
+ ))
453
+
454
+ # Check which threading backends are available.
455
+ def parse_error(e, backend):
456
+ # parses a linux based error message, this is to provide feedback
457
+ # and hide user paths etc
458
+ try:
459
+ path, problem, symbol = [x.strip() for x in e.msg.split(':')]
460
+ extn_dso = os.path.split(path)[1]
461
+ if backend in extn_dso:
462
+ return "%s: %s" % (problem, symbol)
463
+ except Exception:
464
+ pass
465
+ return "Unknown import problem."
466
+
467
+ try:
468
+ # check import is ok, this means the DSO linkage is working
469
+ from numba.np.ufunc import tbbpool # NOQA
470
+ # check that the version is compatible, this is a check performed at
471
+ # runtime (well, compile time), it will also ImportError if there's
472
+ # a problem.
473
+ from numba.np.ufunc.parallel import _check_tbb_version_compatible
474
+ _check_tbb_version_compatible()
475
+ sys_info[_tbb_thread] = True
476
+ except ImportError as e:
477
+ # might be a missing symbol due to e.g. tbb libraries missing
478
+ sys_info[_tbb_thread] = False
479
+ sys_info[_tbb_error] = parse_error(e, 'tbbpool')
480
+
481
+ try:
482
+ from numba.np.ufunc import omppool
483
+ sys_info[_openmp_thread] = True
484
+ sys_info[_openmp_vendor] = omppool.openmp_vendor
485
+ except ImportError as e:
486
+ sys_info[_openmp_thread] = False
487
+ sys_info[_openmp_error] = parse_error(e, 'omppool')
488
+
489
+ try:
490
+ from numba.np.ufunc import workqueue # NOQA
491
+ sys_info[_wkq_thread] = True
492
+ except ImportError as e:
493
+ sys_info[_wkq_thread] = True
494
+ sys_info[_wkq_error] = parse_error(e, 'workqueue')
495
+
496
+ # Look for conda and installed packages information
497
+ cmd = ('conda', 'info', '--json')
498
+ try:
499
+ conda_out = check_output(cmd)
500
+ except Exception as e:
501
+ _warning_log.append(f'Warning: Conda not available.\n Error was {e}\n')
502
+ # Conda is not available, try pip list to list installed packages
503
+ cmd = (sys.executable, '-m', 'pip', 'list')
504
+ try:
505
+ reqs = check_output(cmd)
506
+ except Exception as e:
507
+ _error_log.append(f'Error (pip): {e}')
508
+ else:
509
+ sys_info[_inst_pkg] = reqs.decode().splitlines()
510
+
511
+ else:
512
+ jsond = json.loads(conda_out.decode())
513
+ keys = {
514
+ 'conda_build_version': _conda_build_ver,
515
+ 'conda_env_version': _conda_env_ver,
516
+ 'platform': _conda_platform,
517
+ 'python_version': _conda_python_ver,
518
+ 'root_writable': _conda_root_writable,
519
+ }
520
+ for conda_k, sysinfo_k in keys.items():
521
+ sys_info[sysinfo_k] = jsond.get(conda_k, 'N/A')
522
+
523
+ # Get info about packages in current environment
524
+ cmd = ('conda', 'list')
525
+ try:
526
+ conda_out = check_output(cmd)
527
+ except CalledProcessError as e:
528
+ _error_log.append(f'Error (conda): {e}')
529
+ else:
530
+ data = conda_out.decode().splitlines()
531
+ sys_info[_inst_pkg] = [l for l in data if not l.startswith('#')]
532
+
533
+ sys_info.update(get_os_spec_info(sys_info[_os_name]))
534
+ sys_info[_errors] = _error_log
535
+ sys_info[_warnings] = _warning_log
536
+ sys_info[_runtime] = (datetime.now() - sys_info[_start]).total_seconds()
537
+ return sys_info
538
+
539
+
540
+ def display_sysinfo(info=None, sep_pos=45):
541
+ class DisplayMap(dict):
542
+ display_map_flag = True
543
+
544
+ class DisplaySeq(tuple):
545
+ display_seq_flag = True
546
+
547
+ class DisplaySeqMaps(tuple):
548
+ display_seqmaps_flag = True
549
+
550
+ if info is None:
551
+ info = get_sysinfo()
552
+
553
+ fmt = f'%-{sep_pos}s : %-s'
554
+ MB = 1024**2
555
+ template = (
556
+ ("-" * 80,),
557
+ ("__Time Stamp__",),
558
+ ("Report started (local time)", info.get(_start, '?')),
559
+ ("UTC start time", info.get(_start_utc, '?')),
560
+ ("Running time (s)", info.get(_runtime, '?')),
561
+ ("",),
562
+ ("__Hardware Information__",),
563
+ ("Machine", info.get(_machine, '?')),
564
+ ("CPU Name", info.get(_cpu_name, '?')),
565
+ ("CPU Count", info.get(_cpu_count, '?')),
566
+ ("Number of accessible CPUs", info.get(_cpus_allowed, '?')),
567
+ ("List of accessible CPUs cores", info.get(_cpus_list, '?')),
568
+ ("CFS Restrictions (CPUs worth of runtime)",
569
+ info.get(_cfs_restrict, 'None')),
570
+ ("",),
571
+ ("CPU Features", '\n'.join(
572
+ ' ' * (sep_pos + 3) + l if i else l
573
+ for i, l in enumerate(
574
+ textwrap.wrap(
575
+ info.get(_cpu_features, '?'),
576
+ width=79 - sep_pos
577
+ )
578
+ )
579
+ )),
580
+ ("",),
581
+ ("Memory Total (MB)", info.get(_mem_total, 0) // MB or '?'),
582
+ ("Memory Available (MB)"
583
+ if info.get(_os_name, '') != 'Darwin' or info.get(_psutil, False)
584
+ else "Free Memory (MB)", info.get(_mem_available, 0) // MB or '?'),
585
+ ("",),
586
+ ("__OS Information__",),
587
+ ("Platform Name", info.get(_platform_name, '?')),
588
+ ("Platform Release", info.get(_platform_release, '?')),
589
+ ("OS Name", info.get(_os_name, '?')),
590
+ ("OS Version", info.get(_os_version, '?')),
591
+ ("OS Specific Version", info.get(_os_spec_version, '?')),
592
+ ("Libc Version", info.get(_libc_version, '?')),
593
+ ("",),
594
+ ("__Python Information__",),
595
+ DisplayMap({k: v for k, v in info.items() if k.startswith('Python')}),
596
+ ("",),
597
+ ("__Numba Toolchain Versions__",),
598
+ ("Numba Version", info.get(_numba_version, '?')),
599
+ ("llvmlite Version", info.get(_llvmlite_version, '?')),
600
+ ("",),
601
+ ("__LLVM Information__",),
602
+ ("LLVM Version", info.get(_llvm_version, '?')),
603
+ ("",),
604
+ ("__CUDA Information__",),
605
+ ("CUDA Target Implementation", info.get(_cu_target_impl, '?')),
606
+ ("CUDA Device Initialized", info.get(_cu_dev_init, '?')),
607
+ ("CUDA Driver Version", info.get(_cu_drv_ver, '?')),
608
+ ("CUDA Runtime Version", info.get(_cu_rt_ver, '?')),
609
+ ("CUDA NVIDIA Bindings Available", info.get(_cu_nvidia_bindings, '?')),
610
+ ("CUDA NVIDIA Bindings In Use",
611
+ info.get(_cu_nvidia_bindings_used, '?')),
612
+ ("CUDA Minor Version Compatibility Available",
613
+ info.get(_cu_mvc_available, '?')),
614
+ ("CUDA Minor Version Compatibility Needed",
615
+ info.get(_cu_mvc_needed, '?')),
616
+ ("CUDA Minor Version Compatibility In Use",
617
+ info.get(_cu_mvc_in_use, '?')),
618
+ ("CUDA Detect Output:",),
619
+ (info.get(_cu_detect_out, "None"),),
620
+ ("CUDA Libraries Test Output:",),
621
+ (info.get(_cu_lib_test, "None"),),
622
+ ("",),
623
+ ("__NumPy Information__",),
624
+ ("NumPy Version", info.get(_numpy_version, '?')),
625
+ ("NumPy Supported SIMD features",
626
+ DisplaySeq(info.get(_numpy_supported_simd_features, [])
627
+ or ('None found.',))),
628
+ ("NumPy Supported SIMD dispatch",
629
+ DisplaySeq(info.get(_numpy_supported_simd_dispatch, [])
630
+ or ('None found.',))),
631
+ ("NumPy Supported SIMD baseline",
632
+ DisplaySeq(info.get(_numpy_supported_simd_baseline, [])
633
+ or ('None found.',))),
634
+ ("NumPy AVX512_SKX support detected",
635
+ info.get(_numpy_AVX512_SKX_detected, '?')),
636
+ ("",),
637
+ ("__SVML Information__",),
638
+ ("SVML State, config.USING_SVML", info.get(_svml_state, '?')),
639
+ ("SVML Library Loaded", info.get(_svml_loaded, '?')),
640
+ ("llvmlite Using SVML Patched LLVM", info.get(_llvm_svml_patched, '?')),
641
+ ("SVML Operational", info.get(_svml_operational, '?')),
642
+ ("",),
643
+ ("__Threading Layer Information__",),
644
+ ("TBB Threading Layer Available", info.get(_tbb_thread, '?')),
645
+ ("+-->TBB imported successfully." if info.get(_tbb_thread, '?')
646
+ else f"+--> Disabled due to {info.get(_tbb_error, '?')}",),
647
+ ("OpenMP Threading Layer Available", info.get(_openmp_thread, '?')),
648
+ (f"+-->Vendor: {info.get(_openmp_vendor, '?')}"
649
+ if info.get(_openmp_thread, False)
650
+ else f"+--> Disabled due to {info.get(_openmp_error, '?')}",),
651
+ ("Workqueue Threading Layer Available", info.get(_wkq_thread, '?')),
652
+ ("+-->Workqueue imported successfully." if info.get(_wkq_thread, False)
653
+ else f"+--> Disabled due to {info.get(_wkq_error, '?')}",),
654
+ ("",),
655
+ ("__Numba Environment Variable Information__",),
656
+ (DisplayMap(info.get(_numba_env_vars, {})) or ('None found.',)),
657
+ ("",),
658
+ ("__Conda Information__",),
659
+ (DisplayMap({k: v for k, v in info.items()
660
+ if k.startswith('Conda')}) or ("Conda not available.",)),
661
+ ("",),
662
+ ("__Installed Packages__",),
663
+ DisplaySeq(info.get(_inst_pkg, ("Couldn't retrieve packages info.",))),
664
+ ("",),
665
+ ("__Error log__" if info.get(_errors, [])
666
+ else "No errors reported.",),
667
+ DisplaySeq(info.get(_errors, [])),
668
+ ("",),
669
+ ("__Warning log__" if info.get(_warnings, [])
670
+ else "No warnings reported.",),
671
+ DisplaySeq(info.get(_warnings, [])),
672
+ ("-" * 80,),
673
+ ("If requested, please copy and paste the information between\n"
674
+ "the dashed (----) lines, or from a given specific section as\n"
675
+ "appropriate.\n\n"
676
+ "=============================================================\n"
677
+ "IMPORTANT: Please ensure that you are happy with sharing the\n"
678
+ "contents of the information present, any information that you\n"
679
+ "wish to keep private you should remove before sharing.\n"
680
+ "=============================================================\n",),
681
+ )
682
+ for t in template:
683
+ if hasattr(t, 'display_seq_flag'):
684
+ print(*t, sep='\n')
685
+ elif hasattr(t, 'display_map_flag'):
686
+ print(*tuple(fmt % (k, v) for (k, v) in t.items()), sep='\n')
687
+ elif hasattr(t, 'display_seqmaps_flag'):
688
+ for d in t:
689
+ print(*tuple(fmt % ('\t' + k, v) for (k, v) in d.items()),
690
+ sep='\n', end='\n')
691
+ elif len(t) == 2:
692
+ print(fmt % t)
693
+ else:
694
+ print(*t)
695
+
696
+
697
+ if __name__ == '__main__':
698
+ display_sysinfo()
venv/lib/python3.10/site-packages/numba/misc/quicksort.py ADDED
@@ -0,0 +1,261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+
3
+ import numpy as np
4
+
5
+ from numba.core import types, config
6
+
7
+
8
+ QuicksortImplementation = collections.namedtuple(
9
+ 'QuicksortImplementation',
10
+ (# The compile function itself
11
+ 'compile',
12
+ # All subroutines exercised by test_sort
13
+ 'partition', 'partition3', 'insertion_sort',
14
+ # The top-level function
15
+ 'run_quicksort',
16
+ ))
17
+
18
+
19
+ Partition = collections.namedtuple('Partition', ('start', 'stop'))
20
+
21
+ # Under this size, switch to a simple insertion sort
22
+ SMALL_QUICKSORT = 15
23
+
24
+ MAX_STACK = 100
25
+
26
+
27
+ def make_quicksort_impl(wrap, lt=None, is_argsort=False, is_list=False, is_np_array=False):
28
+
29
+ if config.USE_LEGACY_TYPE_SYSTEM:
30
+ intp = types.intp
31
+ else:
32
+ intp = types.py_int
33
+ zero = intp(0)
34
+
35
+ # Two subroutines to make the core algorithm generic wrt. argsort
36
+ # or normal sorting. Note the genericity may make basic sort()
37
+ # slightly slower (~5%)
38
+ if is_argsort:
39
+ if is_list:
40
+ @wrap
41
+ def make_res(A):
42
+ return [x for x in range(len(A))]
43
+ else:
44
+ @wrap
45
+ def make_res(A):
46
+ return np.arange(A.size)
47
+
48
+ @wrap
49
+ def GET(A, idx_or_val):
50
+ return A[idx_or_val]
51
+
52
+ else:
53
+ @wrap
54
+ def make_res(A):
55
+ return A
56
+
57
+ @wrap
58
+ def GET(A, idx_or_val):
59
+ return idx_or_val
60
+
61
+ def default_lt(a, b):
62
+ """
63
+ Trivial comparison function between two keys.
64
+ """
65
+ return a < b
66
+
67
+ LT = wrap(lt if lt is not None else default_lt)
68
+
69
+ @wrap
70
+ def insertion_sort(A, R, low, high):
71
+ """
72
+ Insertion sort A[low:high + 1]. Note the inclusive bounds.
73
+ """
74
+ assert low >= 0
75
+ if high <= low:
76
+ return
77
+
78
+ for i in range(low + 1, high + 1):
79
+ k = R[i]
80
+ v = GET(A, k)
81
+ # Insert v into A[low:i]
82
+ j = i
83
+ while j > low and LT(v, GET(A, R[j - 1])):
84
+ # Make place for moving A[i] downwards
85
+ R[j] = R[j - 1]
86
+ j -= 1
87
+ R[j] = k
88
+
89
+ @wrap
90
+ def partition(A, R, low, high):
91
+ """
92
+ Partition A[low:high + 1] around a chosen pivot. The pivot's index
93
+ is returned.
94
+ """
95
+ assert low >= 0
96
+ assert high > low
97
+
98
+ mid = (low + high) >> 1
99
+ # NOTE: the pattern of swaps below for the pivot choice and the
100
+ # partitioning gives good results (i.e. regular O(n log n))
101
+ # on sorted, reverse-sorted, and uniform arrays. Subtle changes
102
+ # risk breaking this property.
103
+
104
+ # median of three {low, middle, high}
105
+ if LT(GET(A, R[mid]), GET(A, R[low])):
106
+ R[low], R[mid] = R[mid], R[low]
107
+ if LT(GET(A, R[high]), GET(A, R[mid])):
108
+ R[high], R[mid] = R[mid], R[high]
109
+ if LT(GET(A, R[mid]), GET(A, R[low])):
110
+ R[low], R[mid] = R[mid], R[low]
111
+ pivot = GET(A, R[mid])
112
+
113
+ # Temporarily stash the pivot at the end
114
+ R[high], R[mid] = R[mid], R[high]
115
+ i = low
116
+ j = high - 1
117
+ while True:
118
+ while i < high and LT(GET(A, R[i]), pivot):
119
+ i += 1
120
+ while j >= low and LT(pivot, GET(A, R[j])):
121
+ j -= 1
122
+ if i >= j:
123
+ break
124
+ R[i], R[j] = R[j], R[i]
125
+ i += 1
126
+ j -= 1
127
+ # Put the pivot back in its final place (all items before `i`
128
+ # are smaller than the pivot, all items at/after `i` are larger)
129
+ R[i], R[high] = R[high], R[i]
130
+ return i
131
+
132
+ @wrap
133
+ def partition3(A, low, high):
134
+ """
135
+ Three-way partition [low, high) around a chosen pivot.
136
+ A tuple (lt, gt) is returned such that:
137
+ - all elements in [low, lt) are < pivot
138
+ - all elements in [lt, gt] are == pivot
139
+ - all elements in (gt, high] are > pivot
140
+ """
141
+ mid = (low + high) >> 1
142
+ # median of three {low, middle, high}
143
+ if LT(A[mid], A[low]):
144
+ A[low], A[mid] = A[mid], A[low]
145
+ if LT(A[high], A[mid]):
146
+ A[high], A[mid] = A[mid], A[high]
147
+ if LT(A[mid], A[low]):
148
+ A[low], A[mid] = A[mid], A[low]
149
+ pivot = A[mid]
150
+
151
+ A[low], A[mid] = A[mid], A[low]
152
+ lt = low
153
+ gt = high
154
+ i = low + 1
155
+ while i <= gt:
156
+ if LT(A[i], pivot):
157
+ A[lt], A[i] = A[i], A[lt]
158
+ lt += 1
159
+ i += 1
160
+ elif LT(pivot, A[i]):
161
+ A[gt], A[i] = A[i], A[gt]
162
+ gt -= 1
163
+ else:
164
+ i += 1
165
+ return lt, gt
166
+
167
+ @wrap
168
+ def run_quicksort1(A):
169
+ R = make_res(A)
170
+
171
+ if len(A) < 2:
172
+ return R
173
+
174
+ stack = [Partition(zero, zero)] * MAX_STACK
175
+ stack[0] = Partition(zero, len(A) - 1)
176
+ n = 1
177
+
178
+ while n > 0:
179
+ n -= 1
180
+ low, high = stack[n]
181
+ # Partition until it becomes more efficient to do an insertion sort
182
+ while high - low >= SMALL_QUICKSORT:
183
+ assert n < MAX_STACK
184
+ i = partition(A, R, low, high)
185
+ # Push largest partition on the stack
186
+ if high - i > i - low:
187
+ # Right is larger
188
+ if high > i:
189
+ stack[n] = Partition(i + 1, high)
190
+ n += 1
191
+ high = i - 1
192
+ else:
193
+ if i > low:
194
+ stack[n] = Partition(low, i - 1)
195
+ n += 1
196
+ low = i + 1
197
+
198
+ insertion_sort(A, R, low, high)
199
+
200
+ return R
201
+
202
+ if is_np_array:
203
+ @wrap
204
+ def run_quicksort(A):
205
+ if A.ndim == 1:
206
+ return run_quicksort1(A)
207
+ else:
208
+ for idx in np.ndindex(A.shape[:-1]):
209
+ run_quicksort1(A[idx])
210
+ return A
211
+ else:
212
+ @wrap
213
+ def run_quicksort(A):
214
+ return run_quicksort1(A)
215
+
216
+ # Unused quicksort implementation based on 3-way partitioning; the
217
+ # partitioning scheme turns out exhibiting bad behaviour on sorted arrays.
218
+ @wrap
219
+ def _run_quicksort(A):
220
+ stack = [Partition(zero, zero)] * 100
221
+ stack[0] = Partition(zero, len(A) - 1)
222
+ n = 1
223
+
224
+ while n > 0:
225
+ n -= 1
226
+ low, high = stack[n]
227
+ # Partition until it becomes more efficient to do an insertion sort
228
+ while high - low >= SMALL_QUICKSORT:
229
+ assert n < MAX_STACK
230
+ l, r = partition3(A, low, high)
231
+ # One trivial (empty) partition => iterate on the other
232
+ if r == high:
233
+ high = l - 1
234
+ elif l == low:
235
+ low = r + 1
236
+ # Push largest partition on the stack
237
+ elif high - r > l - low:
238
+ # Right is larger
239
+ stack[n] = Partition(r + 1, high)
240
+ n += 1
241
+ high = l - 1
242
+ else:
243
+ stack[n] = Partition(low, l - 1)
244
+ n += 1
245
+ low = r + 1
246
+
247
+ insertion_sort(A, low, high)
248
+
249
+
250
+ return QuicksortImplementation(wrap,
251
+ partition, partition3, insertion_sort,
252
+ run_quicksort)
253
+
254
+
255
+ def make_py_quicksort(*args, **kwargs):
256
+ return make_quicksort_impl((lambda f: f), *args, **kwargs)
257
+
258
+ def make_jit_quicksort(*args, **kwargs):
259
+ from numba.core.extending import register_jitable
260
+ return make_quicksort_impl((lambda f: register_jitable(f)),
261
+ *args, **kwargs)
venv/lib/python3.10/site-packages/numba/misc/special.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ from numba.core.typing.typeof import typeof
4
+ from numba.core.typing.asnumbatype import as_numba_type
5
+
6
+
7
+ def pndindex(*args):
8
+ """ Provides an n-dimensional parallel iterator that generates index tuples
9
+ for each iteration point. Sequentially, pndindex is identical to np.ndindex.
10
+ """
11
+ return np.ndindex(*args)
12
+
13
+
14
+ class prange(object):
15
+ """ Provides a 1D parallel iterator that generates a sequence of integers.
16
+ In non-parallel contexts, prange is identical to range.
17
+ """
18
+ def __new__(cls, *args):
19
+ return range(*args)
20
+
21
+
22
+ def _gdb_python_call_gen(func_name, *args):
23
+ # generates a call to a function containing a compiled in gdb command,
24
+ # this is to make `numba.gdb*` work in the interpreter.
25
+ import numba
26
+ fn = getattr(numba, func_name)
27
+ argstr = ','.join(['"%s"' for _ in args]) % args
28
+ defn = """def _gdb_func_injection():\n\t%s(%s)\n
29
+ """ % (func_name, argstr)
30
+ l = {}
31
+ exec(defn, {func_name: fn}, l)
32
+ return numba.njit(l['_gdb_func_injection'])
33
+
34
+
35
+ def gdb(*args):
36
+ """
37
+ Calling this function will invoke gdb and attach it to the current process
38
+ at the call site. Arguments are strings in the gdb command language syntax
39
+ which will be executed by gdb once initialisation has occurred.
40
+ """
41
+ _gdb_python_call_gen('gdb', *args)()
42
+
43
+
44
+ def gdb_breakpoint():
45
+ """
46
+ Calling this function will inject a breakpoint at the call site that is
47
+ recognised by both `gdb` and `gdb_init`, this is to allow breaking at
48
+ multiple points. gdb will stop in the user defined code just after the frame
49
+ employed by the breakpoint returns.
50
+ """
51
+ _gdb_python_call_gen('gdb_breakpoint')()
52
+
53
+
54
+ def gdb_init(*args):
55
+ """
56
+ Calling this function will invoke gdb and attach it to the current process
57
+ at the call site, then continue executing the process under gdb's control.
58
+ Arguments are strings in the gdb command language syntax which will be
59
+ executed by gdb once initialisation has occurred.
60
+ """
61
+ _gdb_python_call_gen('gdb_init', *args)()
62
+
63
+
64
+ def literally(obj):
65
+ """Forces Numba to interpret *obj* as an Literal value.
66
+
67
+ *obj* must be either a literal or an argument of the caller function, where
68
+ the argument must be bound to a literal. The literal requirement
69
+ propagates up the call stack.
70
+
71
+ This function is intercepted by the compiler to alter the compilation
72
+ behavior to wrap the corresponding function parameters as ``Literal``.
73
+ It has **no effect** outside of nopython-mode (interpreter, and objectmode).
74
+
75
+ The current implementation detects literal arguments in two ways:
76
+
77
+ 1. Scans for uses of ``literally`` via a compiler pass.
78
+ 2. ``literally`` is overloaded to raise ``numba.errors.ForceLiteralArg``
79
+ to signal the dispatcher to treat the corresponding parameter
80
+ differently. This mode is to support indirect use (via a function call).
81
+
82
+ The execution semantic of this function is equivalent to an identity
83
+ function.
84
+
85
+ See :ghfile:`numba/tests/test_literal_dispatch.py` for examples.
86
+ """
87
+ return obj
88
+
89
+
90
+ def literal_unroll(container):
91
+ return container
92
+
93
+
94
+ __all__ = [
95
+ 'typeof',
96
+ 'as_numba_type',
97
+ 'prange',
98
+ 'pndindex',
99
+ 'gdb',
100
+ 'gdb_breakpoint',
101
+ 'gdb_init',
102
+ 'literally',
103
+ 'literal_unroll',
104
+ ]
venv/lib/python3.10/site-packages/numba/misc/timsort.py ADDED
@@ -0,0 +1,943 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Timsort implementation. Mostly adapted from CPython's listobject.c.
3
+
4
+ For more information, see listsort.txt in CPython's source tree.
5
+ """
6
+
7
+
8
+ import collections
9
+
10
+ from numba.core import types
11
+
12
+
13
+ TimsortImplementation = collections.namedtuple(
14
+ 'TimsortImplementation',
15
+ (# The compile function itself
16
+ 'compile',
17
+ # All subroutines exercised by test_sort
18
+ 'count_run', 'binarysort', 'gallop_left', 'gallop_right',
19
+ 'merge_init', 'merge_append', 'merge_pop',
20
+ 'merge_compute_minrun', 'merge_lo', 'merge_hi', 'merge_at',
21
+ 'merge_force_collapse', 'merge_collapse',
22
+ # The top-level functions
23
+ 'run_timsort', 'run_timsort_with_values'
24
+ ))
25
+
26
+
27
+ # The maximum number of entries in a MergeState's pending-runs stack.
28
+ # This is enough to sort arrays of size up to about
29
+ # 32 * phi ** MAX_MERGE_PENDING
30
+ # where phi ~= 1.618. 85 is ridiculously large enough, good for an array
31
+ # with 2**64 elements.
32
+ # NOTE this implementation doesn't depend on it (the stack is dynamically
33
+ # allocated), but it's still good to check as an invariant.
34
+ MAX_MERGE_PENDING = 85
35
+
36
+ # When we get into galloping mode, we stay there until both runs win less
37
+ # often than MIN_GALLOP consecutive times. See listsort.txt for more info.
38
+ MIN_GALLOP = 7
39
+
40
+ # Start size for temp arrays.
41
+ MERGESTATE_TEMP_SIZE = 256
42
+
43
+ # A mergestate is a named tuple with the following members:
44
+ # - *min_gallop* is an integer controlling when we get into galloping mode
45
+ # - *keys* is a temp list for merging keys
46
+ # - *values* is a temp list for merging values, if needed
47
+ # - *pending* is a stack of pending runs to be merged
48
+ # - *n* is the current stack length of *pending*
49
+
50
+ MergeState = collections.namedtuple(
51
+ 'MergeState', ('min_gallop', 'keys', 'values', 'pending', 'n'))
52
+
53
+
54
+ MergeRun = collections.namedtuple('MergeRun', ('start', 'size'))
55
+
56
+
57
+ def make_timsort_impl(wrap, make_temp_area):
58
+
59
+ make_temp_area = wrap(make_temp_area)
60
+ intp = types.intp
61
+ zero = intp(0)
62
+
63
+ @wrap
64
+ def has_values(keys, values):
65
+ return values is not keys
66
+
67
+ @wrap
68
+ def merge_init(keys):
69
+ """
70
+ Initialize a MergeState for a non-keyed sort.
71
+ """
72
+ temp_size = min(len(keys) // 2 + 1, MERGESTATE_TEMP_SIZE)
73
+ temp_keys = make_temp_area(keys, temp_size)
74
+ temp_values = temp_keys
75
+ pending = [MergeRun(zero, zero)] * MAX_MERGE_PENDING
76
+ return MergeState(intp(MIN_GALLOP), temp_keys, temp_values, pending, zero)
77
+
78
+ @wrap
79
+ def merge_init_with_values(keys, values):
80
+ """
81
+ Initialize a MergeState for a keyed sort.
82
+ """
83
+ temp_size = min(len(keys) // 2 + 1, MERGESTATE_TEMP_SIZE)
84
+ temp_keys = make_temp_area(keys, temp_size)
85
+ temp_values = make_temp_area(values, temp_size)
86
+ pending = [MergeRun(zero, zero)] * MAX_MERGE_PENDING
87
+ return MergeState(intp(MIN_GALLOP), temp_keys, temp_values, pending, zero)
88
+
89
+ @wrap
90
+ def merge_append(ms, run):
91
+ """
92
+ Append a run on the merge stack.
93
+ """
94
+ n = ms.n
95
+ assert n < MAX_MERGE_PENDING
96
+ ms.pending[n] = run
97
+ return MergeState(ms.min_gallop, ms.keys, ms.values, ms.pending, n + 1)
98
+
99
+ @wrap
100
+ def merge_pop(ms):
101
+ """
102
+ Pop the top run from the merge stack.
103
+ """
104
+ return MergeState(ms.min_gallop, ms.keys, ms.values, ms.pending, ms.n - 1)
105
+
106
+ @wrap
107
+ def merge_getmem(ms, need):
108
+ """
109
+ Ensure enough temp memory for 'need' items is available.
110
+ """
111
+ alloced = len(ms.keys)
112
+ if need <= alloced:
113
+ return ms
114
+ # Over-allocate
115
+ while alloced < need:
116
+ alloced = alloced << 1
117
+ # Don't realloc! That can cost cycles to copy the old data, but
118
+ # we don't care what's in the block.
119
+ temp_keys = make_temp_area(ms.keys, alloced)
120
+ if has_values(ms.keys, ms.values):
121
+ temp_values = make_temp_area(ms.values, alloced)
122
+ else:
123
+ temp_values = temp_keys
124
+ return MergeState(ms.min_gallop, temp_keys, temp_values, ms.pending, ms.n)
125
+
126
+ @wrap
127
+ def merge_adjust_gallop(ms, new_gallop):
128
+ """
129
+ Modify the MergeState's min_gallop.
130
+ """
131
+ return MergeState(intp(new_gallop), ms.keys, ms.values, ms.pending, ms.n)
132
+
133
+
134
+ @wrap
135
+ def LT(a, b):
136
+ """
137
+ Trivial comparison function between two keys. This is factored out to
138
+ make it clear where comparisons occur.
139
+ """
140
+ return a < b
141
+
142
+ @wrap
143
+ def binarysort(keys, values, lo, hi, start):
144
+ """
145
+ binarysort is the best method for sorting small arrays: it does
146
+ few compares, but can do data movement quadratic in the number of
147
+ elements.
148
+ [lo, hi) is a contiguous slice of a list, and is sorted via
149
+ binary insertion. This sort is stable.
150
+ On entry, must have lo <= start <= hi, and that [lo, start) is already
151
+ sorted (pass start == lo if you don't know!).
152
+ """
153
+ assert lo <= start and start <= hi
154
+ _has_values = has_values(keys, values)
155
+ if lo == start:
156
+ start += 1
157
+ while start < hi:
158
+ pivot = keys[start]
159
+ # Bisect to find where to insert `pivot`
160
+ # NOTE: bisection only wins over linear search if the comparison
161
+ # function is much more expensive than simply moving data.
162
+ l = lo
163
+ r = start
164
+ # Invariants:
165
+ # pivot >= all in [lo, l).
166
+ # pivot < all in [r, start).
167
+ # The second is vacuously true at the start.
168
+ while l < r:
169
+ p = l + ((r - l) >> 1)
170
+ if LT(pivot, keys[p]):
171
+ r = p
172
+ else:
173
+ l = p+1
174
+
175
+ # The invariants still hold, so pivot >= all in [lo, l) and
176
+ # pivot < all in [l, start), so pivot belongs at l. Note
177
+ # that if there are elements equal to pivot, l points to the
178
+ # first slot after them -- that's why this sort is stable.
179
+ # Slide over to make room (aka memmove()).
180
+ for p in range(start, l, -1):
181
+ keys[p] = keys[p - 1]
182
+ keys[l] = pivot
183
+ if _has_values:
184
+ pivot_val = values[start]
185
+ for p in range(start, l, -1):
186
+ values[p] = values[p - 1]
187
+ values[l] = pivot_val
188
+
189
+ start += 1
190
+
191
+
192
+ @wrap
193
+ def count_run(keys, lo, hi):
194
+ """
195
+ Return the length of the run beginning at lo, in the slice [lo, hi).
196
+ lo < hi is required on entry. "A run" is the longest ascending sequence, with
197
+
198
+ lo[0] <= lo[1] <= lo[2] <= ...
199
+
200
+ or the longest descending sequence, with
201
+
202
+ lo[0] > lo[1] > lo[2] > ...
203
+
204
+ A tuple (length, descending) is returned, where boolean *descending*
205
+ is set to 0 in the former case, or to 1 in the latter.
206
+ For its intended use in a stable mergesort, the strictness of the defn of
207
+ "descending" is needed so that the caller can safely reverse a descending
208
+ sequence without violating stability (strict > ensures there are no equal
209
+ elements to get out of order).
210
+ """
211
+ assert lo < hi
212
+ if lo + 1 == hi:
213
+ # Trivial 1-long run
214
+ return 1, False
215
+ if LT(keys[lo + 1], keys[lo]):
216
+ # Descending run
217
+ for k in range(lo + 2, hi):
218
+ if not LT(keys[k], keys[k - 1]):
219
+ return k - lo, True
220
+ return hi - lo, True
221
+ else:
222
+ # Ascending run
223
+ for k in range(lo + 2, hi):
224
+ if LT(keys[k], keys[k - 1]):
225
+ return k - lo, False
226
+ return hi - lo, False
227
+
228
+
229
+ @wrap
230
+ def gallop_left(key, a, start, stop, hint):
231
+ """
232
+ Locate the proper position of key in a sorted vector; if the vector contains
233
+ an element equal to key, return the position immediately to the left of
234
+ the leftmost equal element. [gallop_right() does the same except returns
235
+ the position to the right of the rightmost equal element (if any).]
236
+
237
+ "a" is a sorted vector with stop elements, starting at a[start].
238
+ stop must be > start.
239
+
240
+ "hint" is an index at which to begin the search, start <= hint < stop.
241
+ The closer hint is to the final result, the faster this runs.
242
+
243
+ The return value is the int k in start..stop such that
244
+
245
+ a[k-1] < key <= a[k]
246
+
247
+ pretending that a[start-1] is minus infinity and a[stop] is plus infinity.
248
+ IOW, key belongs at index k; or, IOW, the first k elements of a should
249
+ precede key, and the last stop-start-k should follow key.
250
+
251
+ See listsort.txt for info on the method.
252
+ """
253
+ assert stop > start
254
+ assert hint >= start and hint < stop
255
+ n = stop - start
256
+
257
+ # First, gallop from the hint to find a "good" subinterval for bisecting
258
+ lastofs = 0
259
+ ofs = 1
260
+ if LT(a[hint], key):
261
+ # a[hint] < key => gallop right, until
262
+ # a[hint + lastofs] < key <= a[hint + ofs]
263
+ maxofs = stop - hint
264
+ while ofs < maxofs:
265
+ if LT(a[hint + ofs], key):
266
+ lastofs = ofs
267
+ ofs = (ofs << 1) + 1
268
+ if ofs <= 0:
269
+ # Int overflow
270
+ ofs = maxofs
271
+ else:
272
+ # key <= a[hint + ofs]
273
+ break
274
+ if ofs > maxofs:
275
+ ofs = maxofs
276
+ # Translate back to offsets relative to a[0]
277
+ lastofs += hint
278
+ ofs += hint
279
+ else:
280
+ # key <= a[hint] => gallop left, until
281
+ # a[hint - ofs] < key <= a[hint - lastofs]
282
+ maxofs = hint - start + 1
283
+ while ofs < maxofs:
284
+ if LT(a[hint - ofs], key):
285
+ break
286
+ else:
287
+ # key <= a[hint - ofs]
288
+ lastofs = ofs
289
+ ofs = (ofs << 1) + 1
290
+ if ofs <= 0:
291
+ # Int overflow
292
+ ofs = maxofs
293
+ if ofs > maxofs:
294
+ ofs = maxofs
295
+ # Translate back to positive offsets relative to a[0]
296
+ lastofs, ofs = hint - ofs, hint - lastofs
297
+
298
+ assert start - 1 <= lastofs and lastofs < ofs and ofs <= stop
299
+ # Now a[lastofs] < key <= a[ofs], so key belongs somewhere to the
300
+ # right of lastofs but no farther right than ofs. Do a binary
301
+ # search, with invariant a[lastofs-1] < key <= a[ofs].
302
+ lastofs += 1
303
+ while lastofs < ofs:
304
+ m = lastofs + ((ofs - lastofs) >> 1)
305
+ if LT(a[m], key):
306
+ # a[m] < key
307
+ lastofs = m + 1
308
+ else:
309
+ # key <= a[m]
310
+ ofs = m
311
+ # Now lastofs == ofs, so a[ofs - 1] < key <= a[ofs]
312
+ return ofs
313
+
314
+
315
+ @wrap
316
+ def gallop_right(key, a, start, stop, hint):
317
+ """
318
+ Exactly like gallop_left(), except that if key already exists in a[start:stop],
319
+ finds the position immediately to the right of the rightmost equal value.
320
+
321
+ The return value is the int k in start..stop such that
322
+
323
+ a[k-1] <= key < a[k]
324
+
325
+ The code duplication is massive, but this is enough different given that
326
+ we're sticking to "<" comparisons that it's much harder to follow if
327
+ written as one routine with yet another "left or right?" flag.
328
+ """
329
+ assert stop > start
330
+ assert hint >= start and hint < stop
331
+ n = stop - start
332
+
333
+ # First, gallop from the hint to find a "good" subinterval for bisecting
334
+ lastofs = 0
335
+ ofs = 1
336
+ if LT(key, a[hint]):
337
+ # key < a[hint] => gallop left, until
338
+ # a[hint - ofs] <= key < a[hint - lastofs]
339
+ maxofs = hint - start + 1
340
+ while ofs < maxofs:
341
+ if LT(key, a[hint - ofs]):
342
+ lastofs = ofs
343
+ ofs = (ofs << 1) + 1
344
+ if ofs <= 0:
345
+ # Int overflow
346
+ ofs = maxofs
347
+ else:
348
+ # a[hint - ofs] <= key
349
+ break
350
+ if ofs > maxofs:
351
+ ofs = maxofs
352
+ # Translate back to positive offsets relative to a[0]
353
+ lastofs, ofs = hint - ofs, hint - lastofs
354
+ else:
355
+ # a[hint] <= key -- gallop right, until
356
+ # a[hint + lastofs] <= key < a[hint + ofs]
357
+ maxofs = stop - hint
358
+ while ofs < maxofs:
359
+ if LT(key, a[hint + ofs]):
360
+ break
361
+ else:
362
+ # a[hint + ofs] <= key
363
+ lastofs = ofs
364
+ ofs = (ofs << 1) + 1
365
+ if ofs <= 0:
366
+ # Int overflow
367
+ ofs = maxofs
368
+ if ofs > maxofs:
369
+ ofs = maxofs
370
+ # Translate back to offsets relative to a[0]
371
+ lastofs += hint
372
+ ofs += hint
373
+
374
+ assert start - 1 <= lastofs and lastofs < ofs and ofs <= stop
375
+ # Now a[lastofs] <= key < a[ofs], so key belongs somewhere to the
376
+ # right of lastofs but no farther right than ofs. Do a binary
377
+ # search, with invariant a[lastofs-1] <= key < a[ofs].
378
+ lastofs += 1
379
+ while lastofs < ofs:
380
+ m = lastofs + ((ofs - lastofs) >> 1)
381
+ if LT(key, a[m]):
382
+ # key < a[m]
383
+ ofs = m
384
+ else:
385
+ # a[m] <= key
386
+ lastofs = m + 1
387
+ # Now lastofs == ofs, so a[ofs - 1] <= key < a[ofs]
388
+ return ofs
389
+
390
+
391
+ @wrap
392
+ def merge_compute_minrun(n):
393
+ """
394
+ Compute a good value for the minimum run length; natural runs shorter
395
+ than this are boosted artificially via binary insertion.
396
+
397
+ If n < 64, return n (it's too small to bother with fancy stuff).
398
+ Else if n is an exact power of 2, return 32.
399
+ Else return an int k, 32 <= k <= 64, such that n/k is close to, but
400
+ strictly less than, an exact power of 2.
401
+
402
+ See listsort.txt for more info.
403
+ """
404
+ r = 0
405
+ assert n >= 0
406
+ while n >= 64:
407
+ r |= n & 1
408
+ n >>= 1
409
+ return n + r
410
+
411
+
412
+ @wrap
413
+ def sortslice_copy(dest_keys, dest_values, dest_start,
414
+ src_keys, src_values, src_start,
415
+ nitems):
416
+ """
417
+ Upwards memcpy().
418
+ """
419
+ assert src_start >= 0
420
+ assert dest_start >= 0
421
+ for i in range(nitems):
422
+ dest_keys[dest_start + i] = src_keys[src_start + i]
423
+ if has_values(src_keys, src_values):
424
+ for i in range(nitems):
425
+ dest_values[dest_start + i] = src_values[src_start + i]
426
+
427
+ @wrap
428
+ def sortslice_copy_down(dest_keys, dest_values, dest_start,
429
+ src_keys, src_values, src_start,
430
+ nitems):
431
+ """
432
+ Downwards memcpy().
433
+ """
434
+ assert src_start >= 0
435
+ assert dest_start >= 0
436
+ for i in range(nitems):
437
+ dest_keys[dest_start - i] = src_keys[src_start - i]
438
+ if has_values(src_keys, src_values):
439
+ for i in range(nitems):
440
+ dest_values[dest_start - i] = src_values[src_start - i]
441
+
442
+
443
+ # Disable this for debug or perf comparison
444
+ DO_GALLOP = 1
445
+
446
+ @wrap
447
+ def merge_lo(ms, keys, values, ssa, na, ssb, nb):
448
+ """
449
+ Merge the na elements starting at ssa with the nb elements starting at
450
+ ssb = ssa + na in a stable way, in-place. na and nb must be > 0,
451
+ and should have na <= nb. See listsort.txt for more info.
452
+
453
+ An updated MergeState is returned (with possibly a different min_gallop
454
+ or larger temp arrays).
455
+
456
+ NOTE: compared to CPython's timsort, the requirement that
457
+ "Must also have that keys[ssa + na - 1] belongs at the end of the merge"
458
+
459
+ is removed. This makes the code a bit simpler and easier to reason about.
460
+ """
461
+ assert na > 0 and nb > 0 and na <= nb
462
+ assert ssb == ssa + na
463
+ # First copy [ssa, ssa + na) into the temp space
464
+ ms = merge_getmem(ms, na)
465
+ sortslice_copy(ms.keys, ms.values, 0,
466
+ keys, values, ssa,
467
+ na)
468
+ a_keys = ms.keys
469
+ a_values = ms.values
470
+ b_keys = keys
471
+ b_values = values
472
+ dest = ssa
473
+ ssa = 0
474
+
475
+ _has_values = has_values(a_keys, a_values)
476
+ min_gallop = ms.min_gallop
477
+
478
+ # Now start merging into the space left from [ssa, ...)
479
+
480
+ while nb > 0 and na > 0:
481
+ # Do the straightforward thing until (if ever) one run
482
+ # appears to win consistently.
483
+ acount = 0
484
+ bcount = 0
485
+
486
+ while True:
487
+ if LT(b_keys[ssb], a_keys[ssa]):
488
+ keys[dest] = b_keys[ssb]
489
+ if _has_values:
490
+ values[dest] = b_values[ssb]
491
+ dest += 1
492
+ ssb += 1
493
+ nb -= 1
494
+ if nb == 0:
495
+ break
496
+ # It's a B run
497
+ bcount += 1
498
+ acount = 0
499
+ if bcount >= min_gallop:
500
+ break
501
+ else:
502
+ keys[dest] = a_keys[ssa]
503
+ if _has_values:
504
+ values[dest] = a_values[ssa]
505
+ dest += 1
506
+ ssa += 1
507
+ na -= 1
508
+ if na == 0:
509
+ break
510
+ # It's a A run
511
+ acount += 1
512
+ bcount = 0
513
+ if acount >= min_gallop:
514
+ break
515
+
516
+ # One run is winning so consistently that galloping may
517
+ # be a huge win. So try that, and continue galloping until
518
+ # (if ever) neither run appears to be winning consistently
519
+ # anymore.
520
+ if DO_GALLOP and na > 0 and nb > 0:
521
+ min_gallop += 1
522
+
523
+ while acount >= MIN_GALLOP or bcount >= MIN_GALLOP:
524
+ # As long as we gallop without leaving this loop, make
525
+ # the heuristic more likely
526
+ min_gallop -= min_gallop > 1
527
+
528
+ # Gallop in A to find where keys[ssb] should end up
529
+ k = gallop_right(b_keys[ssb], a_keys, ssa, ssa + na, ssa)
530
+ # k is an index, make it a size
531
+ k -= ssa
532
+ acount = k
533
+ if k > 0:
534
+ # Copy everything from A before k
535
+ sortslice_copy(keys, values, dest,
536
+ a_keys, a_values, ssa,
537
+ k)
538
+ dest += k
539
+ ssa += k
540
+ na -= k
541
+ if na == 0:
542
+ # Finished merging
543
+ break
544
+ # Copy keys[ssb]
545
+ keys[dest] = b_keys[ssb]
546
+ if _has_values:
547
+ values[dest] = b_values[ssb]
548
+ dest += 1
549
+ ssb += 1
550
+ nb -= 1
551
+ if nb == 0:
552
+ # Finished merging
553
+ break
554
+
555
+ # Gallop in B to find where keys[ssa] should end up
556
+ k = gallop_left(a_keys[ssa], b_keys, ssb, ssb + nb, ssb)
557
+ # k is an index, make it a size
558
+ k -= ssb
559
+ bcount = k
560
+ if k > 0:
561
+ # Copy everything from B before k
562
+ # NOTE: source and dest are the same buffer, but the
563
+ # destination index is below the source index
564
+ sortslice_copy(keys, values, dest,
565
+ b_keys, b_values, ssb,
566
+ k)
567
+ dest += k
568
+ ssb += k
569
+ nb -= k
570
+ if nb == 0:
571
+ # Finished merging
572
+ break
573
+ # Copy keys[ssa]
574
+ keys[dest] = a_keys[ssa]
575
+ if _has_values:
576
+ values[dest] = a_values[ssa]
577
+ dest += 1
578
+ ssa += 1
579
+ na -= 1
580
+ if na == 0:
581
+ # Finished merging
582
+ break
583
+
584
+ # Penalize it for leaving galloping mode
585
+ min_gallop += 1
586
+
587
+ # Merge finished, now handle the remaining areas
588
+ if nb == 0:
589
+ # Only A remaining to copy at the end of the destination area
590
+ sortslice_copy(keys, values, dest,
591
+ a_keys, a_values, ssa,
592
+ na)
593
+ else:
594
+ assert na == 0
595
+ assert dest == ssb
596
+ # B's tail is already at the right place, do nothing
597
+
598
+ return merge_adjust_gallop(ms, min_gallop)
599
+
600
+
601
+ @wrap
602
+ def merge_hi(ms, keys, values, ssa, na, ssb, nb):
603
+ """
604
+ Merge the na elements starting at ssa with the nb elements starting at
605
+ ssb = ssa + na in a stable way, in-place. na and nb must be > 0,
606
+ and should have na >= nb. See listsort.txt for more info.
607
+
608
+ An updated MergeState is returned (with possibly a different min_gallop
609
+ or larger temp arrays).
610
+
611
+ NOTE: compared to CPython's timsort, the requirement that
612
+ "Must also have that keys[ssa + na - 1] belongs at the end of the merge"
613
+
614
+ is removed. This makes the code a bit simpler and easier to reason about.
615
+ """
616
+ assert na > 0 and nb > 0 and na >= nb
617
+ assert ssb == ssa + na
618
+ # First copy [ssb, ssb + nb) into the temp space
619
+ ms = merge_getmem(ms, nb)
620
+ sortslice_copy(ms.keys, ms.values, 0,
621
+ keys, values, ssb,
622
+ nb)
623
+ a_keys = keys
624
+ a_values = values
625
+ b_keys = ms.keys
626
+ b_values = ms.values
627
+
628
+ # Now start merging *in descending order* into the space left
629
+ # from [..., ssb + nb).
630
+ dest = ssb + nb - 1
631
+ ssb = nb - 1
632
+ ssa = ssa + na - 1
633
+
634
+ _has_values = has_values(b_keys, b_values)
635
+ min_gallop = ms.min_gallop
636
+
637
+ while nb > 0 and na > 0:
638
+ # Do the straightforward thing until (if ever) one run
639
+ # appears to win consistently.
640
+ acount = 0
641
+ bcount = 0
642
+
643
+ while True:
644
+ if LT(b_keys[ssb], a_keys[ssa]):
645
+ # We merge in descending order, so copy the larger value
646
+ keys[dest] = a_keys[ssa]
647
+ if _has_values:
648
+ values[dest] = a_values[ssa]
649
+ dest -= 1
650
+ ssa -= 1
651
+ na -= 1
652
+ if na == 0:
653
+ break
654
+ # It's a A run
655
+ acount += 1
656
+ bcount = 0
657
+ if acount >= min_gallop:
658
+ break
659
+ else:
660
+ keys[dest] = b_keys[ssb]
661
+ if _has_values:
662
+ values[dest] = b_values[ssb]
663
+ dest -= 1
664
+ ssb -= 1
665
+ nb -= 1
666
+ if nb == 0:
667
+ break
668
+ # It's a B run
669
+ bcount += 1
670
+ acount = 0
671
+ if bcount >= min_gallop:
672
+ break
673
+
674
+ # One run is winning so consistently that galloping may
675
+ # be a huge win. So try that, and continue galloping until
676
+ # (if ever) neither run appears to be winning consistently
677
+ # anymore.
678
+ if DO_GALLOP and na > 0 and nb > 0:
679
+ min_gallop += 1
680
+
681
+ while acount >= MIN_GALLOP or bcount >= MIN_GALLOP:
682
+ # As long as we gallop without leaving this loop, make
683
+ # the heuristic more likely
684
+ min_gallop -= min_gallop > 1
685
+
686
+ # Gallop in A to find where keys[ssb] should end up
687
+ k = gallop_right(b_keys[ssb], a_keys, ssa - na + 1, ssa + 1, ssa)
688
+ # k is an index, make it a size from the end
689
+ k = ssa + 1 - k
690
+ acount = k
691
+ if k > 0:
692
+ # Copy everything from A after k.
693
+ # Destination and source are the same buffer, and destination
694
+ # index is greater, so copy from the end to the start.
695
+ sortslice_copy_down(keys, values, dest,
696
+ a_keys, a_values, ssa,
697
+ k)
698
+ dest -= k
699
+ ssa -= k
700
+ na -= k
701
+ if na == 0:
702
+ # Finished merging
703
+ break
704
+ # Copy keys[ssb]
705
+ keys[dest] = b_keys[ssb]
706
+ if _has_values:
707
+ values[dest] = b_values[ssb]
708
+ dest -= 1
709
+ ssb -= 1
710
+ nb -= 1
711
+ if nb == 0:
712
+ # Finished merging
713
+ break
714
+
715
+ # Gallop in B to find where keys[ssa] should end up
716
+ k = gallop_left(a_keys[ssa], b_keys, ssb - nb + 1, ssb + 1, ssb)
717
+ # k is an index, make it a size from the end
718
+ k = ssb + 1 - k
719
+ bcount = k
720
+ if k > 0:
721
+ # Copy everything from B before k
722
+ sortslice_copy_down(keys, values, dest,
723
+ b_keys, b_values, ssb,
724
+ k)
725
+ dest -= k
726
+ ssb -= k
727
+ nb -= k
728
+ if nb == 0:
729
+ # Finished merging
730
+ break
731
+ # Copy keys[ssa]
732
+ keys[dest] = a_keys[ssa]
733
+ if _has_values:
734
+ values[dest] = a_values[ssa]
735
+ dest -= 1
736
+ ssa -= 1
737
+ na -= 1
738
+ if na == 0:
739
+ # Finished merging
740
+ break
741
+
742
+ # Penalize it for leaving galloping mode
743
+ min_gallop += 1
744
+
745
+ # Merge finished, now handle the remaining areas
746
+ if na == 0:
747
+ # Only B remaining to copy at the front of the destination area
748
+ sortslice_copy(keys, values, dest - nb + 1,
749
+ b_keys, b_values, ssb - nb + 1,
750
+ nb)
751
+ else:
752
+ assert nb == 0
753
+ assert dest == ssa
754
+ # A's front is already at the right place, do nothing
755
+
756
+ return merge_adjust_gallop(ms, min_gallop)
757
+
758
+
759
+ @wrap
760
+ def merge_at(ms, keys, values, i):
761
+ """
762
+ Merge the two runs at stack indices i and i+1.
763
+
764
+ An updated MergeState is returned.
765
+ """
766
+ n = ms.n
767
+ assert n >= 2
768
+ assert i >= 0
769
+ assert i == n - 2 or i == n - 3
770
+
771
+ ssa, na = ms.pending[i]
772
+ ssb, nb = ms.pending[i + 1]
773
+ assert na > 0 and nb > 0
774
+ assert ssa + na == ssb
775
+
776
+ # Record the length of the combined runs; if i is the 3rd-last
777
+ # run now, also slide over the last run (which isn't involved
778
+ # in this merge). The current run i+1 goes away in any case.
779
+ ms.pending[i] = MergeRun(ssa, na + nb)
780
+ if i == n - 3:
781
+ ms.pending[i + 1] = ms.pending[i + 2]
782
+ ms = merge_pop(ms)
783
+
784
+ # Where does b start in a? Elements in a before that can be
785
+ # ignored (already in place).
786
+ k = gallop_right(keys[ssb], keys, ssa, ssa + na, ssa)
787
+ # [k, ssa + na) remains to be merged
788
+ na -= k - ssa
789
+ ssa = k
790
+ if na == 0:
791
+ return ms
792
+
793
+ # Where does a end in b? Elements in b after that can be
794
+ # ignored (already in place).
795
+ k = gallop_left(keys[ssa + na - 1], keys, ssb, ssb + nb, ssb + nb - 1)
796
+ # [ssb, k) remains to be merged
797
+ nb = k - ssb
798
+
799
+ # Merge what remains of the runs, using a temp array with
800
+ # min(na, nb) elements.
801
+ if na <= nb:
802
+ return merge_lo(ms, keys, values, ssa, na, ssb, nb)
803
+ else:
804
+ return merge_hi(ms, keys, values, ssa, na, ssb, nb)
805
+
806
+
807
+ @wrap
808
+ def merge_collapse(ms, keys, values):
809
+ """
810
+ Examine the stack of runs waiting to be merged, merging adjacent runs
811
+ until the stack invariants are re-established:
812
+
813
+ 1. len[-3] > len[-2] + len[-1]
814
+ 2. len[-2] > len[-1]
815
+
816
+ An updated MergeState is returned.
817
+
818
+ See listsort.txt for more info.
819
+ """
820
+ while ms.n > 1:
821
+ pending = ms.pending
822
+ n = ms.n - 2
823
+ if ((n > 0 and pending[n-1].size <= pending[n].size + pending[n+1].size) or
824
+ (n > 1 and pending[n-2].size <= pending[n-1].size + pending[n].size)):
825
+ if pending[n - 1].size < pending[n + 1].size:
826
+ # Merge smaller one first
827
+ n -= 1
828
+ ms = merge_at(ms, keys, values, n)
829
+ elif pending[n].size < pending[n + 1].size:
830
+ ms = merge_at(ms, keys, values, n)
831
+ else:
832
+ break
833
+ return ms
834
+
835
+ @wrap
836
+ def merge_force_collapse(ms, keys, values):
837
+ """
838
+ Regardless of invariants, merge all runs on the stack until only one
839
+ remains. This is used at the end of the mergesort.
840
+
841
+ An updated MergeState is returned.
842
+ """
843
+ while ms.n > 1:
844
+ pending = ms.pending
845
+ n = ms.n - 2
846
+ if n > 0:
847
+ if pending[n - 1].size < pending[n + 1].size:
848
+ # Merge the smaller one first
849
+ n -= 1
850
+ ms = merge_at(ms, keys, values, n)
851
+ return ms
852
+
853
+
854
+ @wrap
855
+ def reverse_slice(keys, values, start, stop):
856
+ """
857
+ Reverse a slice, in-place.
858
+ """
859
+ i = start
860
+ j = stop - 1
861
+ while i < j:
862
+ keys[i], keys[j] = keys[j], keys[i]
863
+ i += 1
864
+ j -= 1
865
+ if has_values(keys, values):
866
+ i = start
867
+ j = stop - 1
868
+ while i < j:
869
+ values[i], values[j] = values[j], values[i]
870
+ i += 1
871
+ j -= 1
872
+
873
+
874
+ @wrap
875
+ def run_timsort_with_mergestate(ms, keys, values):
876
+ """
877
+ Run timsort with the mergestate.
878
+ """
879
+ nremaining = len(keys)
880
+ if nremaining < 2:
881
+ return
882
+
883
+ # March over the array once, left to right, finding natural runs,
884
+ # and extending short natural runs to minrun elements.
885
+ minrun = merge_compute_minrun(nremaining)
886
+
887
+ lo = zero
888
+ while nremaining > 0:
889
+ n, desc = count_run(keys, lo, lo + nremaining)
890
+ if desc:
891
+ # Descending run => reverse
892
+ reverse_slice(keys, values, lo, lo + n)
893
+ # If short, extend to min(minrun, nremaining)
894
+ if n < minrun:
895
+ force = min(minrun, nremaining)
896
+ binarysort(keys, values, lo, lo + force, lo + n)
897
+ n = force
898
+ # Push run onto stack, and maybe merge.
899
+ ms = merge_append(ms, MergeRun(lo, n))
900
+ ms = merge_collapse(ms, keys, values)
901
+ # Advance to find next run.
902
+ lo += n
903
+ nremaining -= n
904
+
905
+ # All initial runs have been discovered, now finish merging.
906
+ ms = merge_force_collapse(ms, keys, values)
907
+ assert ms.n == 1
908
+ assert ms.pending[0] == (0, len(keys))
909
+
910
+
911
+ @wrap
912
+ def run_timsort(keys):
913
+ """
914
+ Run timsort over the given keys.
915
+ """
916
+ values = keys
917
+ run_timsort_with_mergestate(merge_init(keys), keys, values)
918
+
919
+
920
+ @wrap
921
+ def run_timsort_with_values(keys, values):
922
+ """
923
+ Run timsort over the given keys and values.
924
+ """
925
+ run_timsort_with_mergestate(merge_init_with_values(keys, values),
926
+ keys, values)
927
+
928
+ return TimsortImplementation(
929
+ wrap,
930
+ count_run, binarysort, gallop_left, gallop_right,
931
+ merge_init, merge_append, merge_pop,
932
+ merge_compute_minrun, merge_lo, merge_hi, merge_at,
933
+ merge_force_collapse, merge_collapse,
934
+ run_timsort, run_timsort_with_values)
935
+
936
+
937
+ def make_py_timsort(*args):
938
+ return make_timsort_impl((lambda f: f), *args)
939
+
940
+ def make_jit_timsort(*args):
941
+ from numba import jit
942
+ return make_timsort_impl((lambda f: jit(nopython=True)(f)),
943
+ *args)